1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
|
/*
** Trace management.
** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_trace_c
#define LUA_CORE
#include "lj_obj.h"
#if LJ_HASJIT
#include "lj_gc.h"
#include "lj_err.h"
#include "lj_debug.h"
#include "lj_str.h"
#include "lj_frame.h"
#include "lj_state.h"
#include "lj_bc.h"
#include "lj_ir.h"
#include "lj_jit.h"
#include "lj_iropt.h"
#include "lj_mcode.h"
#include "lj_trace.h"
#include "lj_snap.h"
#include "lj_gdbjit.h"
#include "lj_record.h"
#include "lj_asm.h"
#include "lj_dispatch.h"
#include "lj_vm.h"
#include "lj_vmevent.h"
#include "lj_target.h"
/* -- Error handling ------------------------------------------------------ */
/* Synchronous abort with error message. */
void lj_trace_err(jit_State *J, TraceError e)
{
setnilV(&J->errinfo); /* No error info. */
setintV(J->L->top++, (int32_t)e);
lj_err_throw(J->L, LUA_ERRRUN);
}
/* Synchronous abort with error message and error info. */
void lj_trace_err_info(jit_State *J, TraceError e)
{
setintV(J->L->top++, (int32_t)e);
lj_err_throw(J->L, LUA_ERRRUN);
}
/* -- Trace management ---------------------------------------------------- */
/* The current trace is first assembled in J->cur. The variable length
** arrays point to shared, growable buffers (J->irbuf etc.). When trace
** recording ends successfully, the current trace and its data structures
** are copied to a new (compact) GCtrace object.
*/
/* Find a free trace number. */
static TraceNo trace_findfree(jit_State *J)
{
MSize osz, lim;
if (J->freetrace == 0)
J->freetrace = 1;
for (; J->freetrace < J->sizetrace; J->freetrace++)
if (traceref(J, J->freetrace) == NULL)
return J->freetrace++;
/* Need to grow trace array. */
lim = (MSize)J->param[JIT_P_maxtrace] + 1;
if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
osz = J->sizetrace;
if (osz >= lim)
return 0; /* Too many traces. */
lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
for (; osz < J->sizetrace; osz++)
setgcrefnull(J->trace[osz]);
return J->freetrace;
}
#define TRACE_APPENDVEC(field, szfield, tp) \
T->field = (tp *)p; \
memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
p += J->cur.szfield*sizeof(tp);
#ifdef LUAJIT_USE_PERFTOOLS
/*
** Create symbol table of JIT-compiled code. For use with Linux perf tools.
** Example usage:
** perf record -f -e cycles luajit test.lua
** perf report -s symbol
** rm perf.data /tmp/perf-*.map
*/
#include <stdio.h>
#include <unistd.h>
static void perftools_addtrace(GCtrace *T)
{
static FILE *fp;
GCproto *pt = &gcref(T->startpt)->pt;
const BCIns *startpc = mref(T->startpc, const BCIns);
const char *name = proto_chunknamestr(pt);
BCLine lineno;
if (name[0] == '@' || name[0] == '=')
name++;
else
name = "(string)";
lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc);
lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
if (!fp) {
char fname[40];
sprintf(fname, "/tmp/perf-%d.map", getpid());
if (!(fp = fopen(fname, "w"))) return;
setlinebuf(fp);
}
fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
(long)T->mcode, T->szmcode, T->traceno, name, lineno);
}
#endif
/* Save current trace by copying and compacting it. */
static void trace_save(jit_State *J)
{
size_t sztr = ((sizeof(GCtrace)+7)&~7);
size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
size_t sz = sztr + szins +
J->cur.nsnap*sizeof(SnapShot) +
J->cur.nsnapmap*sizeof(SnapEntry);
GCtrace *T = lj_mem_newt(J->L, (MSize)sz, GCtrace);
char *p = (char *)T + sztr;
memcpy(T, &J->cur, sizeof(GCtrace));
setgcrefr(T->nextgc, J2G(J)->gc.root);
setgcrefp(J2G(J)->gc.root, T);
newwhite(J2G(J), T);
T->gct = ~LJ_TTRACE;
T->ir = (IRIns *)p - J->cur.nk;
memcpy(p, J->cur.ir+J->cur.nk, szins);
p += szins;
TRACE_APPENDVEC(snap, nsnap, SnapShot)
TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
J->cur.traceno = 0;
setgcrefp(J->trace[T->traceno], T);
lj_gc_barriertrace(J2G(J), T->traceno);
lj_gdbjit_addtrace(J, T);
#ifdef LUAJIT_USE_PERFTOOLS
perftools_addtrace(T);
#endif
}
void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
{
jit_State *J = G2J(g);
if (T->traceno) {
lj_gdbjit_deltrace(J, T);
if (T->traceno < J->freetrace)
J->freetrace = T->traceno;
setgcrefnull(J->trace[T->traceno]);
}
lj_mem_free(g, T,
((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
}
/* Re-enable compiling a prototype by unpatching any modified bytecode. */
void lj_trace_reenableproto(GCproto *pt)
{
if ((pt->flags & PROTO_ILOOP)) {
BCIns *bc = proto_bc(pt);
BCPos i, sizebc = pt->sizebc;;
pt->flags &= ~PROTO_ILOOP;
if (bc_op(bc[0]) == BC_IFUNCF)
setbc_op(&bc[0], BC_FUNCF);
for (i = 1; i < sizebc; i++) {
BCOp op = bc_op(bc[i]);
if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
}
}
}
/* Unpatch the bytecode modified by a root trace. */
static void trace_unpatch(jit_State *J, GCtrace *T)
{
BCOp op = bc_op(T->startins);
BCIns *pc = mref(T->startpc, BCIns);
UNUSED(J);
if (op == BC_JMP)
return; /* No need to unpatch branches in parent traces (yet). */
switch (bc_op(*pc)) {
case BC_JFORL:
lua_assert(traceref(J, bc_d(*pc)) == T);
*pc = T->startins;
pc += bc_j(T->startins);
lua_assert(bc_op(*pc) == BC_JFORI);
setbc_op(pc, BC_FORI);
break;
case BC_JITERL:
case BC_JLOOP:
lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op));
*pc = T->startins;
break;
case BC_JMP:
lua_assert(op == BC_ITERL);
pc += bc_j(*pc)+2;
if (bc_op(*pc) == BC_JITERL) {
lua_assert(traceref(J, bc_d(*pc)) == T);
*pc = T->startins;
}
break;
case BC_JFUNCF:
lua_assert(op == BC_FUNCF);
*pc = T->startins;
break;
default: /* Already unpatched. */
break;
}
}
/* Flush a root trace. */
static void trace_flushroot(jit_State *J, GCtrace *T)
{
GCproto *pt = &gcref(T->startpt)->pt;
lua_assert(T->root == 0 && pt != NULL);
/* First unpatch any modified bytecode. */
trace_unpatch(J, T);
/* Unlink root trace from chain anchored in prototype. */
if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */
pt->trace = T->nextroot;
} else if (pt->trace) { /* Otherwise search in chain of root traces. */
GCtrace *T2 = traceref(J, pt->trace);
if (T2) {
for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
if (T2->nextroot == T->traceno) {
T2->nextroot = T->nextroot; /* Unlink from chain. */
break;
}
}
}
}
/* Flush a trace. Only root traces are considered. */
void lj_trace_flush(jit_State *J, TraceNo traceno)
{
if (traceno > 0 && traceno < J->sizetrace) {
GCtrace *T = traceref(J, traceno);
if (T && T->root == 0)
trace_flushroot(J, T);
}
}
/* Flush all traces associated with a prototype. */
void lj_trace_flushproto(global_State *g, GCproto *pt)
{
while (pt->trace != 0)
trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
}
/* Flush all traces. */
int lj_trace_flushall(lua_State *L)
{
jit_State *J = L2J(L);
ptrdiff_t i;
if ((J2G(J)->hookmask & HOOK_GC))
return 1;
for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
GCtrace *T = traceref(J, i);
if (T) {
if (T->root == 0)
trace_flushroot(J, T);
lj_gdbjit_deltrace(J, T);
T->traceno = 0;
setgcrefnull(J->trace[i]);
}
}
J->cur.traceno = 0;
J->freetrace = 0;
/* Clear penalty cache. */
memset(J->penalty, 0, sizeof(J->penalty));
/* Free the whole machine code and invalidate all exit stub groups. */
lj_mcode_free(J);
memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "flush"));
);
return 0;
}
/* Initialize JIT compiler state. */
void lj_trace_initstate(global_State *g)
{
jit_State *J = G2J(g);
TValue *tv;
/* Initialize SIMD constants. */
tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
tv[0].u64 = U64x(7fffffff,ffffffff);
tv[1].u64 = U64x(7fffffff,ffffffff);
tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
tv[0].u64 = U64x(80000000,00000000);
tv[1].u64 = U64x(80000000,00000000);
}
/* Free everything associated with the JIT compiler state. */
void lj_trace_freestate(global_State *g)
{
jit_State *J = G2J(g);
#ifdef LUA_USE_ASSERT
{ /* This assumes all traces have already been freed. */
ptrdiff_t i;
for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL);
}
#endif
lj_mcode_free(J);
lj_ir_k64_freeall(J);
lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
}
/* -- Penalties and blacklisting ------------------------------------------ */
/* Blacklist a bytecode instruction. */
static void blacklist_pc(GCproto *pt, BCIns *pc)
{
setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
pt->flags |= PROTO_ILOOP;
}
/* Penalize a bytecode instruction. */
static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
{
uint32_t i, val = PENALTY_MIN;
for (i = 0; i < PENALTY_SLOTS; i++)
if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */
/* First try to bump its hotcount several times. */
val = ((uint32_t)J->penalty[i].val << 1) +
LJ_PRNG_BITS(J, PENALTY_RNDBITS);
if (val > PENALTY_MAX) {
blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */
return;
}
goto setpenalty;
}
/* Assign a new penalty cache slot. */
i = J->penaltyslot;
J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
setmref(J->penalty[i].pc, pc);
setpenalty:
J->penalty[i].val = (uint16_t)val;
J->penalty[i].reason = e;
hotcount_set(J2GG(J), pc+1, val);
}
/* -- Trace compiler state machine ---------------------------------------- */
/* Start tracing. */
static void trace_start(jit_State *J)
{
lua_State *L;
TraceNo traceno;
if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
if (J->parent == 0) {
/* Lazy bytecode patching to disable hotcount events. */
lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF);
setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
J->pt->flags |= PROTO_ILOOP;
}
J->state = LJ_TRACE_IDLE; /* Silently ignored. */
return;
}
/* Get a new trace number. */
traceno = trace_findfree(J);
if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */
lua_assert((J2G(J)->hookmask & HOOK_GC) == 0);
lj_trace_flushall(J->L);
J->state = LJ_TRACE_IDLE; /* Silently ignored. */
return;
}
setgcrefp(J->trace[traceno], &J->cur);
/* Setup enough of the current trace to be able to send the vmevent. */
memset(&J->cur, 0, sizeof(GCtrace));
J->cur.traceno = traceno;
J->cur.nins = J->cur.nk = REF_BASE;
J->cur.ir = J->irbuf;
J->cur.snap = J->snapbuf;
J->cur.snapmap = J->snapmapbuf;
J->mergesnap = 0;
J->needsnap = 0;
J->bcskip = 0;
J->guardemit.irt = 0;
J->postproc = LJ_POST_NONE;
lj_resetsplit(J);
setgcref(J->cur.startpt, obj2gco(J->pt));
L = J->L;
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "start"));
setintV(L->top++, traceno);
setfuncV(L, L->top++, J->fn);
setintV(L->top++, proto_bcpos(J->pt, J->pc));
if (J->parent) {
setintV(L->top++, J->parent);
setintV(L->top++, J->exitno);
}
);
lj_record_setup(J);
}
/* Stop tracing. */
static void trace_stop(jit_State *J)
{
BCIns *pc = mref(J->cur.startpc, BCIns);
BCOp op = bc_op(J->cur.startins);
GCproto *pt = &gcref(J->cur.startpt)->pt;
TraceNo traceno = J->cur.traceno;
lua_State *L;
switch (op) {
case BC_FORL:
setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */
/* fallthrough */
case BC_LOOP:
case BC_ITERL:
case BC_FUNCF:
/* Patch bytecode of starting instruction in root trace. */
setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
setbc_d(pc, traceno);
addroot:
/* Add to root trace chain in prototype. */
J->cur.nextroot = pt->trace;
pt->trace = (TraceNo1)traceno;
break;
case BC_RET:
case BC_RET0:
case BC_RET1:
*pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
goto addroot;
case BC_JMP:
/* Patch exit branch in parent to side trace entry. */
lua_assert(J->parent != 0 && J->cur.root != 0);
lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
/* Avoid compiling a side trace twice (stack resizing uses parent exit). */
traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE;
/* Add to side trace chain in root trace. */
{
GCtrace *root = traceref(J, J->cur.root);
root->nchild++;
J->cur.nextside = root->nextside;
root->nextside = (TraceNo1)traceno;
}
break;
default:
lua_assert(0);
break;
}
/* Commit new mcode only after all patching is done. */
lj_mcode_commit(J, J->cur.mcode);
J->postproc = LJ_POST_NONE;
trace_save(J);
L = J->L;
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "stop"));
setintV(L->top++, traceno);
);
}
/* Start a new root trace for down-recursion. */
static int trace_downrec(jit_State *J)
{
/* Restart recording at the return instruction. */
lua_assert(J->pt != NULL);
lua_assert(bc_isret(bc_op(*J->pc)));
if (bc_op(*J->pc) == BC_RETM)
return 0; /* NYI: down-recursion with RETM. */
J->parent = 0;
J->exitno = 0;
J->state = LJ_TRACE_RECORD;
trace_start(J);
return 1;
}
/* Abort tracing. */
static int trace_abort(jit_State *J)
{
lua_State *L = J->L;
TraceError e = LJ_TRERR_RECERR;
TraceNo traceno;
J->postproc = LJ_POST_NONE;
lj_mcode_abort(J);
if (tvisnumber(L->top-1))
e = (TraceError)numberVint(L->top-1);
if (e == LJ_TRERR_MCODELM) {
L->top--; /* Remove error object */
J->state = LJ_TRACE_ASM;
return 1; /* Retry ASM with new MCode area. */
}
/* Penalize or blacklist starting bytecode instruction. */
if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins)))
penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e);
/* Is there anything to abort? */
traceno = J->cur.traceno;
if (traceno) {
ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
J->cur.link = 0;
J->cur.linktype = LJ_TRLINK_NONE;
lj_vmevent_send(L, TRACE,
TValue *frame;
const BCIns *pc;
GCfunc *fn;
setstrV(L, L->top++, lj_str_newlit(L, "abort"));
setintV(L->top++, traceno);
/* Find original Lua function call to generate a better error message. */
frame = J->L->base-1;
pc = J->pc;
while (!isluafunc(frame_func(frame))) {
pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
frame = frame_prev(frame);
}
fn = frame_func(frame);
setfuncV(L, L->top++, fn);
setintV(L->top++, proto_bcpos(funcproto(fn), pc));
copyTV(L, L->top++, restorestack(L, errobj));
copyTV(L, L->top++, &J->errinfo);
);
/* Drop aborted trace after the vmevent (which may still access it). */
setgcrefnull(J->trace[traceno]);
if (traceno < J->freetrace)
J->freetrace = traceno;
J->cur.traceno = 0;
}
L->top--; /* Remove error object */
if (e == LJ_TRERR_DOWNREC)
return trace_downrec(J);
else if (e == LJ_TRERR_MCODEAL)
lj_trace_flushall(L);
return 0;
}
/* Perform pending re-patch of a bytecode instruction. */
static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
{
if (LJ_UNLIKELY(J->patchpc)) {
if (force || J->bcskip == 0) {
*J->patchpc = J->patchins;
J->patchpc = NULL;
} else {
J->bcskip = 0;
}
}
}
/* State machine for the trace compiler. Protected callback. */
static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
{
jit_State *J = (jit_State *)ud;
UNUSED(dummy);
do {
retry:
switch (J->state) {
case LJ_TRACE_START:
J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */
trace_start(J);
lj_dispatch_update(J2G(J));
break;
case LJ_TRACE_RECORD:
trace_pendpatch(J, 0);
setvmstate(J2G(J), RECORD);
lj_vmevent_send_(L, RECORD,
/* Save/restore tmptv state for trace recorder. */
TValue savetv = J2G(J)->tmptv;
TValue savetv2 = J2G(J)->tmptv2;
setintV(L->top++, J->cur.traceno);
setfuncV(L, L->top++, J->fn);
setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
setintV(L->top++, J->framedepth);
,
J2G(J)->tmptv = savetv;
J2G(J)->tmptv2 = savetv2;
);
lj_record_ins(J);
break;
case LJ_TRACE_END:
trace_pendpatch(J, 1);
J->loopref = 0;
if ((J->flags & JIT_F_OPT_LOOP) &&
J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
setvmstate(J2G(J), OPT);
lj_opt_dce(J);
if (lj_opt_loop(J)) { /* Loop optimization failed? */
J->cur.link = 0;
J->cur.linktype = LJ_TRLINK_NONE;
J->loopref = J->cur.nins;
J->state = LJ_TRACE_RECORD; /* Try to continue recording. */
break;
}
J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */
}
lj_opt_split(J);
lj_opt_sink(J);
if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
J->state = LJ_TRACE_ASM;
break;
case LJ_TRACE_ASM:
setvmstate(J2G(J), ASM);
lj_asm_trace(J, &J->cur);
trace_stop(J);
setvmstate(J2G(J), INTERP);
J->state = LJ_TRACE_IDLE;
lj_dispatch_update(J2G(J));
return NULL;
default: /* Trace aborted asynchronously. */
setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
/* fallthrough */
case LJ_TRACE_ERR:
trace_pendpatch(J, 1);
if (trace_abort(J))
goto retry;
setvmstate(J2G(J), INTERP);
J->state = LJ_TRACE_IDLE;
lj_dispatch_update(J2G(J));
return NULL;
}
} while (J->state > LJ_TRACE_RECORD);
return NULL;
}
/* -- Event handling ------------------------------------------------------ */
/* A bytecode instruction is about to be executed. Record it. */
void lj_trace_ins(jit_State *J, const BCIns *pc)
{
/* Note: J->L must already be set. pc is the true bytecode PC here. */
J->pc = pc;
J->fn = curr_func(J->L);
J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
J->state = LJ_TRACE_ERR;
}
/* A hotcount triggered. Start recording a root trace. */
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
{
/* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
ERRNO_SAVE
/* Reset hotcount. */
hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
/* Only start a new trace if not recording or inside __gc call or vmevent. */
if (J->state == LJ_TRACE_IDLE &&
!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
J->parent = 0; /* Root trace. */
J->exitno = 0;
J->state = LJ_TRACE_START;
lj_trace_ins(J, pc-1);
}
ERRNO_RESTORE
}
/* Check for a hot side exit. If yes, start recording a side trace. */
static void trace_hotside(jit_State *J, const BCIns *pc)
{
SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
snap->count != SNAPCOUNT_DONE &&
++snap->count >= J->param[JIT_P_hotexit]) {
lua_assert(J->state == LJ_TRACE_IDLE);
/* J->parent is non-zero for a side trace. */
J->state = LJ_TRACE_START;
lj_trace_ins(J, pc);
}
}
/* Tiny struct to pass data to protected call. */
typedef struct ExitDataCP {
jit_State *J;
void *exptr; /* Pointer to exit state. */
const BCIns *pc; /* Restart interpreter at this PC. */
} ExitDataCP;
/* Need to protect lj_snap_restore because it may throw. */
static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
{
ExitDataCP *exd = (ExitDataCP *)ud;
cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
exd->pc = lj_snap_restore(exd->J, exd->exptr);
UNUSED(dummy);
return NULL;
}
#ifndef LUAJIT_DISABLE_VMEVENT
/* Push all registers from exit state. */
static void trace_exit_regs(lua_State *L, ExitState *ex)
{
int32_t i;
setintV(L->top++, RID_NUM_GPR);
setintV(L->top++, RID_NUM_FPR);
for (i = 0; i < RID_NUM_GPR; i++) {
if (sizeof(ex->gpr[i]) == sizeof(int32_t))
setintV(L->top++, (int32_t)ex->gpr[i]);
else
setnumV(L->top++, (lua_Number)ex->gpr[i]);
}
#if !LJ_SOFTFP
for (i = 0; i < RID_NUM_FPR; i++) {
setnumV(L->top, ex->fpr[i]);
if (LJ_UNLIKELY(tvisnan(L->top)))
setnanV(L->top);
L->top++;
}
#endif
}
#endif
#ifdef EXITSTATE_PCREG
/* Determine trace number from pc of exit instruction. */
static TraceNo trace_exit_find(jit_State *J, MCode *pc)
{
TraceNo traceno;
for (traceno = 1; traceno < J->sizetrace; traceno++) {
GCtrace *T = traceref(J, traceno);
if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
return traceno;
}
lua_assert(0);
return 0;
}
#endif
/* A trace exited. Restore interpreter state. */
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
{
ERRNO_SAVE
lua_State *L = J->L;
ExitState *ex = (ExitState *)exptr;
ExitDataCP exd;
int errcode;
const BCIns *pc;
void *cf;
GCtrace *T;
#ifdef EXITSTATE_PCREG
J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
#endif
T = traceref(J, J->parent); UNUSED(T);
#ifdef EXITSTATE_CHECKEXIT
if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */
lua_assert(T->root != 0);
J->exitno = T->ir[REF_BASE].op2;
J->parent = T->ir[REF_BASE].op1;
T = traceref(J, J->parent);
}
#endif
lua_assert(T != NULL && J->exitno < T->nsnap);
exd.J = J;
exd.exptr = exptr;
errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
if (errcode)
return -errcode; /* Return negated error code. */
lj_vmevent_send(L, TEXIT,
lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
setintV(L->top++, J->parent);
setintV(L->top++, J->exitno);
trace_exit_regs(L, ex);
);
pc = exd.pc;
cf = cframe_raw(L->cframe);
setcframe_pc(cf, pc);
if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
if (!(G(L)->hookmask & HOOK_GC))
lj_gc_step(L); /* Exited because of GC: drive GC forward. */
} else {
trace_hotside(J, pc);
}
if (bc_op(*pc) == BC_JLOOP) {
BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
if (bc_isret(bc_op(*retpc))) {
if (J->state == LJ_TRACE_RECORD) {
J->patchins = *pc;
J->patchpc = (BCIns *)pc;
*J->patchpc = *retpc;
J->bcskip = 1;
} else {
pc = retpc;
setcframe_pc(cf, pc);
}
}
}
/* Return MULTRES or 0. */
ERRNO_RESTORE
switch (bc_op(*pc)) {
case BC_CALLM: case BC_CALLMT:
return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc));
case BC_RETM:
return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
case BC_TSETM:
return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
default:
if (bc_op(*pc) >= BC_FUNCF)
return (int)((BCReg)(L->top - L->base) + 1);
return 0;
}
}
#endif
|