summaryrefslogtreecommitdiff
path: root/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c
diff options
context:
space:
mode:
Diffstat (limited to 'Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c')
-rw-r--r--Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c
index d8e7987c5c2..8638d9edbfd 100644
--- a/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c
+++ b/Build/source/libs/luajit/LuaJIT-src/src/lj_snap.c
@@ -1,6 +1,6 @@
/*
** Snapshot handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_snap_c
@@ -26,9 +26,6 @@
#include "lj_cdata.h"
#endif
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
/* Pass IR on to next optimization in chain (FOLD). */
#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
@@ -73,7 +70,7 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
IRRef ref = tref_ref(tr);
if (ref) {
SnapEntry sn = SNAP_TR(s, tr);
- IRIns *ir = IR(ref);
+ IRIns *ir = &J->cur.ir[ref];
if (!(sn & (SNAP_CONT|SNAP_FRAME)) &&
ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
/* No need to snapshot unmodified non-inherited slots. */
@@ -148,8 +145,8 @@ void lj_snap_add(jit_State *J)
MSize nsnap = J->cur.nsnap;
MSize nsnapmap = J->cur.nsnapmap;
/* Merge if no ins. inbetween or if requested and no guard inbetween. */
- if (J->mergesnap ? !irt_isguard(J->guardemit) :
- (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) {
+ if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
+ (J->mergesnap && !irt_isguard(J->guardemit))) {
if (nsnap == 1) { /* But preserve snap #0 PC. */
emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
goto nomerge;
@@ -407,24 +404,24 @@ static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
}
/* Check whether a sunk store corresponds to an allocation. Slow path. */
-static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs)
+static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
{
if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
- IRIns *irk = IR(irs->op1);
+ IRIns *irk = &T->ir[irs->op1];
if (irk->o == IR_AREF || irk->o == IR_HREFK)
- irk = IR(irk->op1);
- return (IR(irk->op1) == ira);
+ irk = &T->ir[irk->op1];
+ return (&T->ir[irk->op1] == ira);
}
return 0;
}
/* Check whether a sunk store corresponds to an allocation. Fast path. */
-static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs)
+static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
{
if (irs->s != 255)
return (ira + irs->s == irs); /* Fast check. */
- return snap_sunk_store2(J, ira, irs);
+ return snap_sunk_store2(T, ira, irs);
}
/* Replay snapshot state to setup side trace. */
@@ -487,7 +484,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
} else {
IRIns *irs;
for (irs = ir+1; irs < irlast; irs++)
- if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
@@ -521,13 +518,13 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
snap_pref(J, T, map, nent, seen, (ir+1)->op2));
}
- J->slot[snap_slot(sn)] = emitir(ir->ot, op1, op2);
+ J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2);
} else {
IRIns *irs;
TRef tr = emitir(ir->ot, op1, op2);
J->slot[snap_slot(sn)] = tr;
for (irs = ir+1; irs < irlast; irs++)
- if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
IRIns *irr = &T->ir[irs->op1];
TRef val, key = irr->op2, tmp = tr;
if (irr->o != IR_FREF) {
@@ -631,8 +628,8 @@ static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
} else if (irt_isnum(t)) {
setnumV(o, ex->fpr[r-RID_MIN_FPR]);
#endif
- } else if (LJ_64 && irt_islightud(t)) {
- /* 64 bit lightuserdata which may escape already has the tag bits. */
+ } else if (LJ_64 && irt_is64(t)) {
+ /* 64 bit values that already have the tag bits. */
o->u64 = ex->gpr[r-RID_MIN_GPR];
} else if (irt_ispri(t)) {
setpriV(o, irt_toitype(t));
@@ -714,8 +711,9 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
CTState *cts = ctype_cts(J->L);
CTypeID id = (CTypeID)T->ir[ir->op1].i;
- CTSize sz = lj_ctype_size(cts, id);
- GCcdata *cd = lj_cdata_new(cts, id, sz);
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
setcdataV(J->L, o, cd);
if (ir->o == IR_CNEWI) {
uint8_t *p = (uint8_t *)cdataptr(cd);
@@ -729,7 +727,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
} else {
IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
for (irs = ir+1; irs < irlast; irs++)
- if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
IRIns *iro = &T->ir[T->ir[irs->op1].op2];
uint8_t *p = (uint8_t *)cd;
CTSize szs;
@@ -762,7 +760,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
settabV(J->L, o, t);
irlast = &T->ir[T->snap[snapno].ref];
for (irs = ir+1; irs < irlast; irs++)
- if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
IRIns *irk = &T->ir[irs->op1];
TValue tmp, *val;
lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
@@ -863,7 +861,6 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
return pc;
}
-#undef IR
#undef emitir_raw
#undef emitir