PageRenderTime 13ms CodeModel.GetById 20ms app.highlight 82ms RepoModel.GetById 1ms app.codeStats 1ms

/Python/eval.cc

http://unladen-swallow.googlecode.com/
C++ | 5498 lines | 4329 code | 450 blank | 719 comment | 945 complexity | 3ed53cf038a21678d0dfee98cdc32199 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1
   2/* Execute compiled code */
   3
   4/* XXX TO DO:
   5   XXX speed up searching for keywords by using a dictionary
   6   XXX document it!
   7   */
   8
   9/* Note: this file will be compiled as C ifndef WITH_LLVM, so try to keep it
  10   generally C. */
  11
  12/* enable more aggressive intra-module optimizations, where available */
  13#define PY_LOCAL_AGGRESSIVE
  14
  15#include "Python.h"
  16
  17#include "code.h"
  18#include "frameobject.h"
  19#include "eval.h"
  20#include "opcode.h"
  21#include "structmember.h"
  22
  23#include "JIT/llvm_compile.h"
  24#include "Util/EventTimer.h"
  25#include <ctype.h>
  26
  27#ifdef WITH_LLVM
  28#include "_llvmfunctionobject.h"
  29#include "llvm/Function.h"
  30#include "llvm/Support/ManagedStatic.h"
  31#include "llvm/Support/raw_ostream.h"
  32#include "JIT/global_llvm_data.h"
  33#include "JIT/RuntimeFeedback.h"
  34#include "Util/Stats.h"
  35
  36#include <set>
  37
  38using llvm::errs;
  39#endif
  40
  41
  42/* Make a call to stop the call overhead timer before going through to
  43   PyObject_Call. */
  44static inline PyObject *
  45_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw)
  46{
  47	/* If we're calling a compiled C function with *args or **kwargs, then
  48	 * this enum should be CALL_ENTER_C.  However, most calls to C
  49	 * functions are simple and are fast-tracked through the CALL_FUNCTION
  50	 * opcode. */
  51	PY_LOG_TSC_EVENT(CALL_ENTER_PYOBJ_CALL);
  52	return PyObject_Call(func, arg, kw);
  53}
  54
  55
  56#ifdef Py_WITH_INSTRUMENTATION
  57std::string
  58_PyEval_GetCodeName(PyCodeObject *code)
  59{
  60	std::string result;
  61	llvm::raw_string_ostream wrapper(result);
  62
  63	wrapper << PyString_AsString(code->co_filename)
  64		<< ":" << code->co_firstlineno << " "
  65		<< "(" << PyString_AsString(code->co_name) << ")";
  66	wrapper.flush();
  67	return result;
  68}
  69
  70// Collect statistics about how long we block for compilation to LLVM IR and to
  71// machine code.
  72class IrCompilationTimes : public DataVectorStats<int64_t> {
  73public:
  74	IrCompilationTimes()
  75		: DataVectorStats<int64_t>("Time blocked for IR JIT in ns") {}
  76};
  77class McCompilationTimes : public DataVectorStats<int64_t> {
  78public:
  79	McCompilationTimes()
  80		: DataVectorStats<int64_t>("Time blocked for MC JIT in ns") {}
  81};
  82
  83static llvm::ManagedStatic<IrCompilationTimes> ir_compilation_times;
  84static llvm::ManagedStatic<McCompilationTimes> mc_compilation_times;
  85
  86class FeedbackMapCounter {
  87public:
  88	~FeedbackMapCounter() {
  89		errs() << "\nFeedback maps created:\n";
  90		errs() << "N: " << this->counter_ << "\n";
  91	}
  92
  93	void IncCounter() {
  94		this->counter_++;
  95	}
  96
  97private:
  98	unsigned counter_;
  99};
 100
 101static llvm::ManagedStatic<FeedbackMapCounter> feedback_map_counter;
 102
 103
 104class HotnessTracker {
 105	// llvm::DenseSet or llvm::SmallPtrSet may be better, but as of this
 106	// writing, they don't seem to work with std::vector.
 107	std::set<PyCodeObject*> hot_code_;
 108public:
 109	~HotnessTracker();
 110
 111	void AddHotCode(PyCodeObject *code_obj) {
 112		// This will prevent the code object from ever being
 113		// deleted.
 114		Py_INCREF(code_obj);
 115		this->hot_code_.insert(code_obj);
 116	}
 117};
 118
 119static bool
 120compare_hotness(const PyCodeObject *first, const PyCodeObject *second)
 121{
 122	return first->co_hotness > second->co_hotness;
 123}
 124
 125HotnessTracker::~HotnessTracker()
 126{
 127	errs() << "\nCode objects deemed hot:\n";
 128	errs() << "N: " << this->hot_code_.size() << "\n";
 129	errs() << "Function -> hotness score:\n";
 130	std::vector<PyCodeObject*> to_sort(this->hot_code_.begin(),
 131					   this->hot_code_.end());
 132	std::sort(to_sort.begin(), to_sort.end(), compare_hotness);
 133	for (std::vector<PyCodeObject*>::iterator co = to_sort.begin();
 134	     co != to_sort.end(); ++co) {
 135		errs() << _PyEval_GetCodeName(*co)
 136		       << " -> " << (*co)->co_hotness << "\n";
 137	}
 138}
 139
 140static llvm::ManagedStatic<HotnessTracker> hot_code;
 141
 142
 143// Keep track of which functions failed fatal guards, but kept being called.
 144// This can help gauge the efficacy of optimizations that involve fatal guards.
 145class FatalBailTracker {
 146public:
 147	~FatalBailTracker() {
 148		errs() << "\nCode objects that failed fatal guards:\n";
 149		errs() << "\tfile:line (funcname) bail hotness"
 150		       << " -> final hotness\n";
 151
 152		for (TrackerData::const_iterator it = this->code_.begin();
 153				it != this->code_.end(); ++it) {
 154			PyCodeObject *code = it->first;
 155			if (code->co_hotness == it->second)
 156				continue;
 157			errs() << "\t" << _PyEval_GetCodeName(code)
 158			       << "\t" << it->second << " -> "
 159			       << code->co_hotness << "\n";
 160		}
 161	}
 162
 163	void RecordFatalBail(PyCodeObject *code) {
 164		Py_INCREF(code);
 165		this->code_.push_back(std::make_pair(code, code->co_hotness));
 166	}
 167
 168private:
 169	// Keep a list of (code object, hotness) where hotness is the
 170	// value of co_hotness when RecordFatalBail() was called. This is
 171	// used to hide code objects whose machine code functions are
 172	// invalidated during shutdown because their module dict has gone away;
 173	// these code objects are uninteresting for our analysis.
 174	typedef std::pair<PyCodeObject *, long> DataPoint;
 175	typedef std::vector<DataPoint> TrackerData;
 176
 177	TrackerData code_;
 178};
 179
 180
 181static llvm::ManagedStatic<FatalBailTracker> fatal_bail_tracker;
 182
 183// C wrapper for FatalBailTracker::RecordFatalBail().
 184void
 185_PyEval_RecordFatalBail(PyCodeObject *code)
 186{
 187	fatal_bail_tracker->RecordFatalBail(code);
 188}
 189
 190
 191// Collect stats on how many watchers the globals/builtins dicts acculumate.
 192// This currently records how many watchers the dict had when it changed, ie,
 193// how many watchers it had to notify.
 194class WatcherCountStats : public DataVectorStats<size_t> {
 195public:
 196	WatcherCountStats() :
 197		DataVectorStats<size_t>("Number of watchers accumulated") {};
 198};
 199
 200static llvm::ManagedStatic<WatcherCountStats> watcher_count_stats;
 201
 202void
 203_PyEval_RecordWatcherCount(size_t watcher_count)
 204{
 205	watcher_count_stats->RecordDataPoint(watcher_count);
 206}
 207
 208
 209class BailCountStats {
 210public:
 211	BailCountStats() : total_(0), trace_on_entry_(0), line_trace_(0),
 212	                   backedge_trace_(0), call_profile_(0),
 213	                   fatal_guard_fail_(0), guard_fail_(0) {};
 214
 215	~BailCountStats() {
 216		errs() << "\nBailed to the interpreter " << this->total_
 217		       << " times:\n";
 218		errs() << "TRACE_ON_ENTRY: " << this->trace_on_entry_ << "\n";
 219		errs() << "LINE_TRACE: " << this->line_trace_ << "\n";
 220		errs() << "BACKEDGE_TRACE:" << this->backedge_trace_ << "\n";
 221		errs() << "CALL_PROFILE: " << this->call_profile_ << "\n";
 222		errs() << "FATAL_GUARD_FAIL: " << this->fatal_guard_fail_
 223		       << "\n";
 224		errs() << "GUARD_FAIL: " << this->guard_fail_ << "\n";
 225
 226		errs() << "\n" << this->bail_site_freq_.size()
 227		       << " bail sites:\n";
 228		for (BailData::iterator i = this->bail_site_freq_.begin(),
 229		     end = this->bail_site_freq_.end(); i != end; ++i) {
 230			errs() << "    " << i->getKey() << " bailed "
 231			       << i->getValue() << " times\n";
 232		}
 233
 234		errs() << "\n" << this->guard_bail_site_freq_.size()
 235		       << " guard bail sites:\n";
 236		for (BailData::iterator i = this->guard_bail_site_freq_.begin(),
 237		     end = this->guard_bail_site_freq_.end(); i != end; ++i) {
 238			errs() << "    " << i->getKey() << " bailed "
 239			       << i->getValue() << " times\n";
 240		}
 241
 242	}
 243
 244	void RecordBail(PyFrameObject *frame, _PyFrameBailReason bail_reason) {
 245		++this->total_;
 246
 247		std::string record;
 248		llvm::raw_string_ostream wrapper(record);
 249		wrapper << PyString_AsString(frame->f_code->co_filename) << ":";
 250		wrapper << frame->f_code->co_firstlineno << ":";
 251		wrapper << PyString_AsString(frame->f_code->co_name) << ":";
 252		// See the comment in PyEval_EvalFrame about how f->f_lasti is
 253		// initialized.
 254		wrapper << frame->f_lasti + 1;
 255		wrapper.flush();
 256
 257		BailData::value_type &entry =
 258			this->bail_site_freq_.GetOrCreateValue(record, 0);
 259		entry.setValue(entry.getValue() + 1);
 260
 261#define BAIL_CASE(name, field) \
 262	case name: \
 263		++this->field; \
 264		break;
 265
 266		switch (bail_reason) {
 267			BAIL_CASE(_PYFRAME_TRACE_ON_ENTRY, trace_on_entry_)
 268			BAIL_CASE(_PYFRAME_LINE_TRACE, line_trace_)
 269			BAIL_CASE(_PYFRAME_BACKEDGE_TRACE, backedge_trace_)
 270			BAIL_CASE(_PYFRAME_CALL_PROFILE, call_profile_)
 271			BAIL_CASE(_PYFRAME_FATAL_GUARD_FAIL, fatal_guard_fail_)
 272			BAIL_CASE(_PYFRAME_GUARD_FAIL, guard_fail_)
 273			default:
 274				abort();   // Unknown bail reason.
 275		}
 276#undef BAIL_CASE
 277
 278		if (bail_reason != _PYFRAME_GUARD_FAIL)
 279			return;
 280
 281		wrapper << ":";
 282
 283#define GUARD_CASE(name) \
 284	case name: \
 285		wrapper << #name; \
 286		break;
 287
 288		switch (frame->f_guard_type) {
 289			GUARD_CASE(_PYGUARD_DEFAULT)
 290			GUARD_CASE(_PYGUARD_BINOP)
 291			GUARD_CASE(_PYGUARD_ATTR)
 292			GUARD_CASE(_PYGUARD_CFUNC)
 293			GUARD_CASE(_PYGUARD_BRANCH)
 294			GUARD_CASE(_PYGUARD_STORE_SUBSCR)
 295			default:
 296				wrapper << ((int)frame->f_guard_type);
 297		}
 298#undef GUARD_CASE
 299
 300		wrapper.flush();
 301
 302		BailData::value_type &g_entry =
 303			this->guard_bail_site_freq_.GetOrCreateValue(record, 0);
 304		g_entry.setValue(g_entry.getValue() + 1);
 305	}
 306
 307private:
 308	typedef llvm::StringMap<unsigned> BailData;
 309	BailData bail_site_freq_;
 310	BailData guard_bail_site_freq_;
 311
 312	long total_;
 313	long trace_on_entry_;
 314	long line_trace_;
 315	long backedge_trace_;
 316	long call_profile_;
 317	long fatal_guard_fail_;
 318	long guard_fail_;
 319};
 320
 321static llvm::ManagedStatic<BailCountStats> bail_count_stats;
 322#endif  // Py_WITH_INSTRUMENTATION
 323
 324
 325/* Turn this on if your compiler chokes on the big switch: */
 326/* #define CASE_TOO_BIG 1 */
 327
 328#ifdef Py_DEBUG
 329/* For debugging the interpreter: */
 330#define LLTRACE  1	/* Low-level trace feature */
 331#define CHECKEXC 1	/* Double-check exception checking */
 332#endif
 333
 334typedef PyObject *(*callproc)(PyObject *, PyObject *, PyObject *);
 335
 336/* Forward declarations */
 337static PyObject * fast_function(PyObject *, PyObject ***, int, int, int);
 338static PyObject * do_call(PyObject *, PyObject ***, int, int);
 339static PyObject * ext_do_call(PyObject *, PyObject ***, int, int, int);
 340static PyObject * update_keyword_args(PyObject *, int, PyObject ***,
 341				      PyObject *);
 342static PyObject * update_star_args(int, int, PyObject *, PyObject ***);
 343static PyObject * load_args(PyObject ***, int);
 344
 345#ifdef WITH_LLVM
 346static inline void mark_called(PyCodeObject *co);
 347static inline int maybe_compile(PyCodeObject *co, PyFrameObject *f);
 348
 349/* Record data for use in generating optimized machine code. */
 350static void record_type(PyCodeObject *, int, int, int, PyObject *);
 351static void record_func(PyCodeObject *, int, int, int, PyObject *);
 352static void record_object(PyCodeObject *, int, int, int, PyObject *);
 353static void inc_feedback_counter(PyCodeObject *, int, int, int, int);
 354#endif  /* WITH_LLVM */
 355
 356int _Py_ProfilingPossible = 0;
 357
 358/* Keep this in sync with llvm_fbuilder.cc */
 359#define CALL_FLAG_VAR 1
 360#define CALL_FLAG_KW 2
 361
 362#ifdef LLTRACE
 363static int lltrace;
 364static int prtrace(PyObject *, char *);
 365#endif
 366static int call_trace_protected(Py_tracefunc, PyObject *,
 367				 PyFrameObject *, int, PyObject *);
 368static int maybe_call_line_trace(Py_tracefunc, PyObject *,
 369				  PyFrameObject *, int *, int *, int *);
 370
 371static PyObject * cmp_outcome(int, PyObject *, PyObject *);
 372static void format_exc_check_arg(PyObject *, char *, PyObject *);
 373static PyObject * string_concatenate(PyObject *, PyObject *,
 374				    PyFrameObject *, unsigned char *);
 375
 376#define NAME_ERROR_MSG \
 377	"name '%.200s' is not defined"
 378#define GLOBAL_NAME_ERROR_MSG \
 379	"global name '%.200s' is not defined"
 380#define UNBOUNDLOCAL_ERROR_MSG \
 381	"local variable '%.200s' referenced before assignment"
 382#define UNBOUNDFREE_ERROR_MSG \
 383	"free variable '%.200s' referenced before assignment" \
 384        " in enclosing scope"
 385
 386/* Dynamic execution profile */
 387#ifdef DYNAMIC_EXECUTION_PROFILE
 388#ifdef DXPAIRS
 389static long dxpairs[257][256];
 390#define dxp dxpairs[256]
 391#else
 392static long dxp[256];
 393#endif
 394#endif
 395
 396/* Function call profile */
 397#ifdef CALL_PROFILE
 398#define PCALL_NUM 11
 399static int pcall[PCALL_NUM];
 400
 401#define PCALL_ALL 0
 402#define PCALL_FUNCTION 1
 403#define PCALL_FAST_FUNCTION 2
 404#define PCALL_FASTER_FUNCTION 3
 405#define PCALL_METHOD 4
 406#define PCALL_BOUND_METHOD 5
 407#define PCALL_CFUNCTION 6
 408#define PCALL_TYPE 7
 409#define PCALL_GENERATOR 8
 410#define PCALL_OTHER 9
 411#define PCALL_POP 10
 412
 413/* Notes about the statistics
 414
 415   PCALL_FAST stats
 416
 417   FAST_FUNCTION means no argument tuple needs to be created.
 418   FASTER_FUNCTION means that the fast-path frame setup code is used.
 419
 420   If there is a method call where the call can be optimized by changing
 421   the argument tuple and calling the function directly, it gets recorded
 422   twice.
 423
 424   As a result, the relationship among the statistics appears to be
 425   PCALL_ALL == PCALL_FUNCTION + PCALL_METHOD - PCALL_BOUND_METHOD +
 426                PCALL_CFUNCTION + PCALL_TYPE + PCALL_GENERATOR + PCALL_OTHER
 427   PCALL_FUNCTION > PCALL_FAST_FUNCTION > PCALL_FASTER_FUNCTION
 428   PCALL_METHOD > PCALL_BOUND_METHOD
 429*/
 430
 431#define PCALL(POS) pcall[POS]++
 432
 433PyObject *
 434PyEval_GetCallStats(PyObject *self)
 435{
 436	return Py_BuildValue("iiiiiiiiiiiii",
 437			     pcall[0], pcall[1], pcall[2], pcall[3],
 438			     pcall[4], pcall[5], pcall[6], pcall[7],
 439			     pcall[8], pcall[9], pcall[10]);
 440}
 441#else
 442#define PCALL(O)
 443
 444PyObject *
 445PyEval_GetCallStats(PyObject *self)
 446{
 447	Py_INCREF(Py_None);
 448	return Py_None;
 449}
 450#endif
 451
 452
 453#ifdef WITH_THREAD
 454
 455#ifdef HAVE_ERRNO_H
 456#include <errno.h>
 457#endif
 458#include "pythread.h"
 459
 460static PyThread_type_lock interpreter_lock = 0; /* This is the GIL */
 461long _PyEval_main_thread = 0;
 462
 463int
 464PyEval_ThreadsInitialized(void)
 465{
 466	return interpreter_lock != 0;
 467}
 468
 469void
 470PyEval_InitThreads(void)
 471{
 472	if (interpreter_lock)
 473		return;
 474	interpreter_lock = PyThread_allocate_lock();
 475	PyThread_acquire_lock(interpreter_lock, 1);
 476	_PyEval_main_thread = PyThread_get_thread_ident();
 477}
 478
 479void
 480PyEval_AcquireLock(void)
 481{
 482	PyThread_acquire_lock(interpreter_lock, 1);
 483}
 484
 485void
 486PyEval_ReleaseLock(void)
 487{
 488	PyThread_release_lock(interpreter_lock);
 489}
 490
 491void
 492PyEval_AcquireThread(PyThreadState *tstate)
 493{
 494	if (tstate == NULL)
 495		Py_FatalError("PyEval_AcquireThread: NULL new thread state");
 496	/* Check someone has called PyEval_InitThreads() to create the lock */
 497	assert(interpreter_lock);
 498	PyThread_acquire_lock(interpreter_lock, 1);
 499	if (PyThreadState_Swap(tstate) != NULL)
 500		Py_FatalError(
 501			"PyEval_AcquireThread: non-NULL old thread state");
 502}
 503
 504void
 505PyEval_ReleaseThread(PyThreadState *tstate)
 506{
 507	if (tstate == NULL)
 508		Py_FatalError("PyEval_ReleaseThread: NULL thread state");
 509	if (PyThreadState_Swap(NULL) != tstate)
 510		Py_FatalError("PyEval_ReleaseThread: wrong thread state");
 511	PyThread_release_lock(interpreter_lock);
 512}
 513
 514/* This function is called from PyOS_AfterFork to ensure that newly
 515   created child processes don't hold locks referring to threads which
 516   are not running in the child process.  (This could also be done using
 517   pthread_atfork mechanism, at least for the pthreads implementation.) */
 518
 519void
 520PyEval_ReInitThreads(void)
 521{
 522	PyObject *threading, *result;
 523	PyThreadState *tstate;
 524
 525	if (!interpreter_lock)
 526		return;
 527	/*XXX Can't use PyThread_free_lock here because it does too
 528	  much error-checking.  Doing this cleanly would require
 529	  adding a new function to each thread_*.h.  Instead, just
 530	  create a new lock and waste a little bit of memory */
 531	interpreter_lock = PyThread_allocate_lock();
 532	PyThread_acquire_lock(interpreter_lock, 1);
 533	_PyEval_main_thread = PyThread_get_thread_ident();
 534
 535	/* Update the threading module with the new state.
 536	 */
 537	tstate = PyThreadState_GET();
 538	threading = PyMapping_GetItemString(tstate->interp->modules,
 539					    "threading");
 540	if (threading == NULL) {
 541		/* threading not imported */
 542		PyErr_Clear();
 543		return;
 544	}
 545	result = PyObject_CallMethod(threading, "_after_fork", NULL);
 546	if (result == NULL)
 547		PyErr_WriteUnraisable(threading);
 548	else
 549		Py_DECREF(result);
 550	Py_DECREF(threading);
 551}
 552#endif
 553
 554/* Functions save_thread and restore_thread are always defined so
 555   dynamically loaded modules needn't be compiled separately for use
 556   with and without threads: */
 557
 558PyThreadState *
 559PyEval_SaveThread(void)
 560{
 561	PyThreadState *tstate = PyThreadState_Swap(NULL);
 562	if (tstate == NULL)
 563		Py_FatalError("PyEval_SaveThread: NULL tstate");
 564#ifdef WITH_THREAD
 565	if (interpreter_lock)
 566		PyThread_release_lock(interpreter_lock);
 567#endif
 568	return tstate;
 569}
 570
 571void
 572PyEval_RestoreThread(PyThreadState *tstate)
 573{
 574	if (tstate == NULL)
 575		Py_FatalError("PyEval_RestoreThread: NULL tstate");
 576#ifdef WITH_THREAD
 577	if (interpreter_lock) {
 578		int err = errno;
 579		PyThread_acquire_lock(interpreter_lock, 1);
 580		errno = err;
 581	}
 582#endif
 583	PyThreadState_Swap(tstate);
 584}
 585
 586
 587/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
 588   signal handlers or Mac I/O completion routines) can schedule calls
 589   to a function to be called synchronously.
 590   The synchronous function is called with one void* argument.
 591   It should return 0 for success or -1 for failure -- failure should
 592   be accompanied by an exception.
 593
 594   If registry succeeds, the registry function returns 0; if it fails
 595   (e.g. due to too many pending calls) it returns -1 (without setting
 596   an exception condition).
 597
 598   Note that because registry may occur from within signal handlers,
 599   or other asynchronous events, calling malloc() is unsafe!
 600
 601#ifdef WITH_THREAD
 602   Any thread can schedule pending calls, but only the main thread
 603   will execute them.
 604#endif
 605
 606   XXX WARNING!  ASYNCHRONOUSLY EXECUTING CODE!
 607   There are two possible race conditions:
 608   (1) nested asynchronous registry calls;
 609   (2) registry calls made while pending calls are being processed.
 610   While (1) is very unlikely, (2) is a real possibility.
 611   The current code is safe against (2), but not against (1).
 612   The safety against (2) is derived from the fact that only one
 613   thread (the main thread) ever takes things out of the queue.
 614
 615   XXX Darn!  With the advent of thread state, we should have an array
 616   of pending calls per thread in the thread state!  Later...
 617*/
 618
 619#define NPENDINGCALLS 32
 620static struct {
 621	int (*func)(void *);
 622	void *arg;
 623} pendingcalls[NPENDINGCALLS];
 624static volatile int pendingfirst = 0;
 625static volatile int pendinglast = 0;
 626static volatile int things_to_do = 0;
 627
 628int
 629Py_AddPendingCall(int (*func)(void *), void *arg)
 630{
 631	static volatile int busy = 0;
 632	int i, j;
 633	/* XXX Begin critical section */
 634	/* XXX If you want this to be safe against nested
 635	   XXX asynchronous calls, you'll have to work harder! */
 636	if (busy)
 637		return -1;
 638	busy = 1;
 639	i = pendinglast;
 640	j = (i + 1) % NPENDINGCALLS;
 641	if (j == pendingfirst) {
 642		busy = 0;
 643		return -1; /* Queue full */
 644	}
 645	pendingcalls[i].func = func;
 646	pendingcalls[i].arg = arg;
 647	pendinglast = j;
 648
 649	_Py_Ticker = 0;
 650	things_to_do = 1; /* Signal main loop */
 651	busy = 0;
 652	/* XXX End critical section */
 653	return 0;
 654}
 655
 656int
 657Py_MakePendingCalls(void)
 658{
 659	static int busy = 0;
 660#ifdef WITH_THREAD
 661	if (_PyEval_main_thread &&
 662	    PyThread_get_thread_ident() != _PyEval_main_thread)
 663		return 0;
 664#endif
 665	if (busy)
 666		return 0;
 667	busy = 1;
 668	things_to_do = 0;
 669	for (;;) {
 670		int i;
 671		int (*func)(void *);
 672		void *arg;
 673		i = pendingfirst;
 674		if (i == pendinglast)
 675			break; /* Queue empty */
 676		func = pendingcalls[i].func;
 677		arg = pendingcalls[i].arg;
 678		pendingfirst = (i + 1) % NPENDINGCALLS;
 679		if (func(arg) < 0) {
 680			busy = 0;
 681			things_to_do = 1; /* We're not done yet */
 682			return -1;
 683		}
 684	}
 685	busy = 0;
 686	return 0;
 687}
 688
 689
 690/* The interpreter's recursion limit */
 691
 692#ifndef Py_DEFAULT_RECURSION_LIMIT
 693#define Py_DEFAULT_RECURSION_LIMIT 1000
 694#endif
 695static int recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
 696int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
 697
 698int
 699Py_GetRecursionLimit(void)
 700{
 701	return recursion_limit;
 702}
 703
 704void
 705Py_SetRecursionLimit(int new_limit)
 706{
 707	recursion_limit = new_limit;
 708	_Py_CheckRecursionLimit = recursion_limit;
 709}
 710
 711/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
 712   if the recursion_depth reaches _Py_CheckRecursionLimit.
 713   If USE_STACKCHECK, the macro decrements _Py_CheckRecursionLimit
 714   to guarantee that _Py_CheckRecursiveCall() is regularly called.
 715   Without USE_STACKCHECK, there is no need for this. */
 716int
 717_Py_CheckRecursiveCall(char *where)
 718{
 719	PyThreadState *tstate = PyThreadState_GET();
 720
 721#ifdef USE_STACKCHECK
 722	if (PyOS_CheckStack()) {
 723		--tstate->recursion_depth;
 724		PyErr_SetString(PyExc_MemoryError, "Stack overflow");
 725		return -1;
 726	}
 727#endif
 728	if (tstate->recursion_depth > recursion_limit) {
 729		--tstate->recursion_depth;
 730		PyErr_Format(PyExc_RuntimeError,
 731			     "maximum recursion depth exceeded%s",
 732			     where);
 733		return -1;
 734	}
 735	_Py_CheckRecursionLimit = recursion_limit;
 736	return 0;
 737}
 738
 739#ifdef __cplusplus
 740extern "C" void
 741#else
 742extern void
 743#endif
 744_PyEval_RaiseForUnboundLocal(PyFrameObject *frame, int var_index)
 745{
 746	format_exc_check_arg(
 747		PyExc_UnboundLocalError,
 748		UNBOUNDLOCAL_ERROR_MSG,
 749		PyTuple_GetItem(frame->f_code->co_varnames, var_index));
 750}
 751
 752/* Records whether tracing is on for any thread.  Counts the number of
 753   threads for which tstate->c_tracefunc is non-NULL, so if the value
 754   is 0, we know we don't have to check this thread's c_tracefunc.
 755   This speeds up the if statement in PyEval_EvalFrameEx() after
 756   fast_next_opcode*/
 757int _Py_TracingPossible = 0;
 758
 759/* for manipulating the thread switch and periodic "stuff" - used to be
 760   per thread, now just a pair o' globals */
 761int _Py_CheckInterval = 100;
 762volatile int _Py_Ticker = 100;
 763
 764#ifdef WITH_LLVM
 765int _Py_BailError = 0;
 766#endif
 767
 768PyObject *
 769PyEval_EvalCode(PyCodeObject *co, PyObject *globals, PyObject *locals)
 770{
 771	return PyEval_EvalCodeEx(co,
 772			  globals, locals,
 773			  (PyObject **)NULL, 0,
 774			  (PyObject **)NULL, 0,
 775			  (PyObject **)NULL, 0,
 776			  NULL);
 777}
 778
 779
 780/* Interpreter main loop */
 781
 782PyObject *
 783PyEval_EvalFrameEx(PyFrameObject *f, int throwflag) {
 784	/* This is for backward compatibility with extension modules that
 785           used this API; core interpreter code should call
 786           PyEval_EvalFrame() */
 787	PyObject *result;
 788	f->f_throwflag = throwflag;
 789	result = PyEval_EvalFrame(f);
 790	f->f_throwflag = 0;
 791	return result;
 792}
 793
 794PyObject *
 795PyEval_EvalFrame(PyFrameObject *f)
 796{
 797#ifdef DXPAIRS
 798	int lastopcode = 0;
 799#endif
 800	register PyObject **stack_pointer;  /* Next free slot in value stack */
 801	register unsigned char *next_instr;
 802	register int opcode;	/* Current opcode */
 803	register int oparg;	/* Current opcode argument, if any */
 804	register enum _PyUnwindReason why; /* Reason for block stack unwind */
 805	register int err;	/* Error status -- nonzero if error */
 806	register PyObject *x;	/* Temporary objects popped off stack */
 807	register PyObject *v;
 808	register PyObject *w;
 809	register PyObject *u;
 810	register PyObject *t;
 811	register PyObject **fastlocals, **freevars;
 812	_PyFrameBailReason bail_reason;
 813	PyObject *retval = NULL;	/* Return value */
 814	PyThreadState *tstate = PyThreadState_GET();
 815	PyCodeObject *co;
 816#ifdef WITH_LLVM
 817	/* We only collect feedback if it will be useful. */
 818	int rec_feedback = (Py_JitControl == PY_JIT_WHENHOT);
 819#endif
 820
 821	/* when tracing we set things up so that
 822
 823               not (instr_lb <= current_bytecode_offset < instr_ub)
 824
 825	   is true when the line being executed has changed.  The
 826           initial values are such as to make this false the first
 827           time it is tested. */
 828	int instr_ub = -1, instr_lb = 0, instr_prev = -1;
 829
 830	unsigned char *first_instr;
 831	PyObject *names;
 832	PyObject *consts;
 833#if defined(Py_DEBUG) || defined(LLTRACE)
 834	/* Make it easier to find out where we are with a debugger */
 835	char *filename;
 836#endif
 837
 838/* Computed GOTOs, or
 839       the-optimization-commonly-but-improperly-known-as-"threaded code"
 840   using gcc's labels-as-values extension
 841   (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
 842
 843   The traditional bytecode evaluation loop uses a "switch" statement, which
 844   decent compilers will optimize as a single indirect branch instruction
 845   combined with a lookup table of jump addresses. However, since the
 846   indirect jump instruction is shared by all opcodes, the CPU will have a
 847   hard time making the right prediction for where to jump next (actually,
 848   it will be always wrong except in the uncommon case of a sequence of
 849   several identical opcodes).
 850
 851   "Threaded code" in contrast, uses an explicit jump table and an explicit
 852   indirect jump instruction at the end of each opcode. Since the jump
 853   instruction is at a different address for each opcode, the CPU will make a
 854   separate prediction for each of these instructions, which is equivalent to
 855   predicting the second opcode of each opcode pair. These predictions have
 856   a much better chance to turn out valid, especially in small bytecode loops.
 857
 858   A mispredicted branch on a modern CPU flushes the whole pipeline and
 859   can cost several CPU cycles (depending on the pipeline depth),
 860   and potentially many more instructions (depending on the pipeline width).
 861   A correctly predicted branch, however, is nearly free.
 862
 863   At the time of this writing, the "threaded code" version is up to 15-20%
 864   faster than the normal "switch" version, depending on the compiler and the
 865   CPU architecture.
 866
 867   We disable the optimization if DYNAMIC_EXECUTION_PROFILE is defined,
 868   because it would render the measurements invalid.
 869
 870
 871   NOTE: care must be taken that the compiler doesn't try to "optimize" the
 872   indirect jumps by sharing them between all opcodes. Such optimizations
 873   can be disabled on gcc by using the -fno-gcse flag (or possibly
 874   -fno-crossjumping).
 875*/
 876
 877#if defined(USE_COMPUTED_GOTOS) && defined(DYNAMIC_EXECUTION_PROFILE)
 878#undef USE_COMPUTED_GOTOS
 879#endif
 880
 881#ifdef USE_COMPUTED_GOTOS
 882/* Import the static jump table */
 883#include "opcode_targets.h"
 884
 885/* This macro is used when several opcodes defer to the same implementation
 886   (e.g. SETUP_LOOP, SETUP_FINALLY) */
 887#define TARGET_WITH_IMPL(op, impl) \
 888	TARGET_##op: \
 889		opcode = op; \
 890		if (HAS_ARG(op)) \
 891			oparg = NEXTARG(); \
 892	case op: \
 893		goto impl; \
 894
 895#define TARGET(op) \
 896	TARGET_##op: \
 897		opcode = op; \
 898		if (HAS_ARG(op)) \
 899			oparg = NEXTARG(); \
 900	case op:
 901
 902
 903#define DISPATCH() \
 904	{ \
 905		/* Avoid multiple loads from _Py_Ticker despite `volatile` */ \
 906		int _tick = _Py_Ticker - 1; \
 907		_Py_Ticker = _tick; \
 908		if (_tick >= 0) { \
 909			FAST_DISPATCH(); \
 910		} \
 911		continue; \
 912	}
 913
 914#ifdef LLTRACE
 915#define FAST_DISPATCH() \
 916	{ \
 917		if (!lltrace && !_Py_TracingPossible) { \
 918			f->f_lasti = INSTR_OFFSET(); \
 919			goto *opcode_targets[*next_instr++]; \
 920		} \
 921		goto fast_next_opcode; \
 922	}
 923#else
 924#define FAST_DISPATCH() \
 925	{ \
 926		if (!_Py_TracingPossible) { \
 927			f->f_lasti = INSTR_OFFSET(); \
 928			goto *opcode_targets[*next_instr++]; \
 929		} \
 930		goto fast_next_opcode; \
 931	}
 932#endif
 933
 934#else
 935#define TARGET(op) \
 936	case op:
 937#define TARGET_WITH_IMPL(op, impl) \
 938	/* silence compiler warnings about `impl` unused */ \
 939	if (0) goto impl; \
 940	case op:
 941#define DISPATCH() continue
 942#define FAST_DISPATCH() goto fast_next_opcode
 943#endif
 944
 945
 946/* Tuple access macros */
 947
 948#ifndef Py_DEBUG
 949#define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
 950#else
 951#define GETITEM(v, i) PyTuple_GetItem((v), (i))
 952#endif
 953
 954/* Code access macros */
 955
 956#define INSTR_OFFSET()	((int)(next_instr - first_instr))
 957#define NEXTOP()	(*next_instr++)
 958#define NEXTARG()	(next_instr += 2, (next_instr[-1]<<8) + next_instr[-2])
 959#define PEEKARG()	((next_instr[2]<<8) + next_instr[1])
 960#define JUMPTO(x)	(next_instr = first_instr + (x))
 961#define JUMPBY(x)	(next_instr += (x))
 962
 963/* Feedback-gathering macros */
 964#ifdef WITH_LLVM
 965#define RECORD_TYPE(arg_index, obj) \
 966	if(rec_feedback){record_type(co, opcode, f->f_lasti, arg_index, obj);}
 967#define RECORD_OBJECT(arg_index, obj) \
 968	if(rec_feedback){record_object(co, opcode, f->f_lasti, arg_index, obj);}
 969#define RECORD_FUNC(obj) \
 970	if(rec_feedback){record_func(co, opcode, f->f_lasti, 0, obj);}
 971#define INC_COUNTER(arg_index, counter_id) \
 972	if (rec_feedback) { \
 973		inc_feedback_counter(co, opcode, f->f_lasti, arg_index, \
 974		                     counter_id); \
 975	}
 976#define RECORD_TRUE() \
 977	INC_COUNTER(0, PY_FDO_JUMP_TRUE)
 978#define RECORD_FALSE() \
 979	INC_COUNTER(0, PY_FDO_JUMP_FALSE)
 980#define RECORD_NONBOOLEAN() \
 981	INC_COUNTER(0, PY_FDO_JUMP_NON_BOOLEAN)
 982#define UPDATE_HOTNESS_JABS() \
 983	do { if (oparg <= f->f_lasti) ++co->co_hotness; } while (0)
 984#else
 985#define RECORD_TYPE(arg_index, obj)
 986#define RECORD_OBJECT(arg_index, obj)
 987#define RECORD_FUNC(obj)
 988#define INC_COUNTER(arg_index, counter_id)
 989#define RECORD_TRUE()
 990#define RECORD_FALSE()
 991#define RECORD_NONBOOLEAN()
 992#define UPDATE_HOTNESS_JABS()
 993#endif  /* WITH_LLVM */
 994
 995
 996/* OpCode prediction macros
 997	Some opcodes tend to come in pairs thus making it possible to
 998	predict the second code when the first is run.  For example,
 999	GET_ITER is often followed by FOR_ITER. And FOR_ITER is often
1000	followed by STORE_FAST or UNPACK_SEQUENCE.
1001
1002	Verifying the prediction costs a single high-speed test of a register
1003	variable against a constant.  If the pairing was good, then the
1004	processor's own internal branch predication has a high likelihood of
1005	success, resulting in a nearly zero-overhead transition to the
1006	next opcode.  A successful prediction saves a trip through the eval-loop
1007	including its two unpredictable branches, the HAS_ARG test and the
1008	switch-case.  Combined with the processor's internal branch prediction,
1009	a successful PREDICT has the effect of making the two opcodes run as if
1010	they were a single new opcode with the bodies combined.
1011
1012    If collecting opcode statistics, your choices are to either keep the
1013	predictions turned-on and interpret the results as if some opcodes
1014	had been combined or turn-off predictions so that the opcode frequency
1015	counter updates for both opcodes.
1016
1017    Opcode prediction is disabled with threaded code, since the latter allows
1018	the CPU to record separate branch prediction information for each
1019	opcode.
1020
1021*/
1022
1023#if defined(DYNAMIC_EXECUTION_PROFILE) || defined(USE_COMPUTED_GOTOS)
1024#define PREDICT(op)		if (0) goto PRED_##op
1025#define PREDICTED(op)		PRED_##op:
1026#define PREDICTED_WITH_ARG(op)	PRED_##op:
1027#else
1028#define PREDICT(op)		if (*next_instr == op) goto PRED_##op
1029#ifdef WITH_LLVM
1030#define PREDICTED_COMMON(op)	f->f_lasti = INSTR_OFFSET(); opcode = op;
1031#else
1032#define PREDICTED_COMMON(op)	/* nothing */
1033#endif
1034#define PREDICTED(op)		PRED_##op: PREDICTED_COMMON(op) next_instr++
1035#define PREDICTED_WITH_ARG(op)	PRED_##op: PREDICTED_COMMON(op) \
1036				oparg = PEEKARG(); next_instr += 3
1037#endif
1038
1039
1040/* Stack manipulation macros */
1041
1042/* The stack can grow at most MAXINT deep, as co_nlocals and
1043   co_stacksize are ints. */
1044#define STACK_LEVEL()	((int)(stack_pointer - f->f_valuestack))
1045#define EMPTY()		(STACK_LEVEL() == 0)
1046#define TOP()		(stack_pointer[-1])
1047#define SECOND()	(stack_pointer[-2])
1048#define THIRD() 	(stack_pointer[-3])
1049#define FOURTH()	(stack_pointer[-4])
1050#define SET_TOP(v)	(stack_pointer[-1] = (v))
1051#define SET_SECOND(v)	(stack_pointer[-2] = (v))
1052#define SET_THIRD(v)	(stack_pointer[-3] = (v))
1053#define SET_FOURTH(v)	(stack_pointer[-4] = (v))
1054#define BASIC_STACKADJ(n)	(stack_pointer += n)
1055#define BASIC_PUSH(v)	(*stack_pointer++ = (v))
1056#define BASIC_POP()	(*--stack_pointer)
1057
1058#ifdef LLTRACE
1059#define PUSH(v)		{ (void)(BASIC_PUSH(v), \
1060                               lltrace && prtrace(TOP(), "push")); \
1061                               assert(STACK_LEVEL() <= co->co_stacksize); }
1062#define POP()		((void)(lltrace && prtrace(TOP(), "pop")), \
1063			 BASIC_POP())
1064#define STACKADJ(n)	{ (void)(BASIC_STACKADJ(n), \
1065                               lltrace && prtrace(TOP(), "stackadj")); \
1066                               assert(STACK_LEVEL() <= co->co_stacksize); }
1067#define EXT_POP(STACK_POINTER) ((void)(lltrace && \
1068				prtrace((STACK_POINTER)[-1], "ext_pop")), \
1069				*--(STACK_POINTER))
1070#define EXT_PUSH(v, STACK_POINTER) ((void)(*(STACK_POINTER)++ = (v), \
1071                   lltrace && prtrace((STACK_POINTER)[-1], "ext_push")))
1072#else
1073#define PUSH(v)		BASIC_PUSH(v)
1074#define POP()		BASIC_POP()
1075#define STACKADJ(n)	BASIC_STACKADJ(n)
1076#define EXT_POP(STACK_POINTER) (*--(STACK_POINTER))
1077#define EXT_PUSH(v, STACK_POINTER) (*(STACK_POINTER)++ = (v))
1078#endif
1079
1080/* Local variable macros */
1081
1082#define GETLOCAL(i)	(fastlocals[i])
1083
1084/* The SETLOCAL() macro must not DECREF the local variable in-place and
1085   then store the new value; it must copy the old value to a temporary
1086   value, then store the new value, and then DECREF the temporary value.
1087   This is because it is possible that during the DECREF the frame is
1088   accessed by other code (e.g. a __del__ method or gc.collect()) and the
1089   variable would be pointing to already-freed memory. */
1090#define SETLOCAL(i, value)	do { PyObject *tmp = GETLOCAL(i); \
1091				     GETLOCAL(i) = value; \
1092                                     Py_XDECREF(tmp); } while (0)
1093
1094/* Start of code */
1095
1096	if (f == NULL)
1097		return NULL;
1098
1099#ifdef WITH_LLVM
1100	bail_reason = (_PyFrameBailReason)f->f_bailed_from_llvm;
1101#else
1102	bail_reason = _PYFRAME_NO_BAIL;
1103#endif  /* WITH_LLVM */
1104	/* push frame */
1105	if (bail_reason == _PYFRAME_NO_BAIL && Py_EnterRecursiveCall(""))
1106		return NULL;
1107
1108	co = f->f_code;
1109	tstate->frame = f;
1110
1111#ifdef WITH_LLVM
1112	maybe_compile(co, f);
1113
1114	if (f->f_use_jit) {
1115		assert(bail_reason == _PYFRAME_NO_BAIL);
1116		assert(co->co_native_function != NULL &&
1117		       "maybe_compile was supposed to ensure"
1118		       " that co_native_function exists");
1119		if (!co->co_use_jit) {
1120			// A frame cannot use_jit if the underlying code object
1121			// can't use_jit. This comes up when a generator is
1122			// invalidated while active.
1123			f->f_use_jit = 0;
1124		}
1125		else {
1126			assert(co->co_fatalbailcount < PY_MAX_FATALBAILCOUNT);
1127			retval = co->co_native_function(f);
1128			goto exit_eval_frame;
1129		}
1130	}
1131
1132	if (bail_reason != _PYFRAME_NO_BAIL) {
1133#ifdef Py_WITH_INSTRUMENTATION
1134		bail_count_stats->RecordBail(f, bail_reason);
1135#endif
1136		if (_Py_BailError) {
1137			/* When we bail, we set f_lasti to the current opcode
1138			 * minus 1, so we add one back.  */
1139			int lasti = f->f_lasti + 1;
1140			PyErr_Format(PyExc_RuntimeError, "bailed to the "
1141				     "interpreter at opcode index %d", lasti);
1142			goto exit_eval_frame;
1143		}
1144	}
1145
1146	/* Create co_runtime_feedback now that we're about to use it.  You
1147	 * might think this would cause a problem if the user flips
1148	 * Py_JitControl from "never" to "whenhot", but since the value of
1149	 * rec_feedback is constant for the duration of this frame's execution,
1150	 * we will not accidentally try to record feedback without initializing
1151	 * co_runtime_feedback.  */
1152	if (rec_feedback && co->co_runtime_feedback == NULL) {
1153#if Py_WITH_INSTRUMENTATION
1154		feedback_map_counter->IncCounter();
1155#endif
1156		co->co_runtime_feedback = PyFeedbackMap_New();
1157	}
1158#endif  /* WITH_LLVM */
1159
1160	switch (bail_reason) {
1161		case _PYFRAME_NO_BAIL:
1162		case _PYFRAME_TRACE_ON_ENTRY:
1163			if (tstate->use_tracing) {
1164				if (_PyEval_TraceEnterFunction(tstate, f))
1165					/* Trace or profile function raised
1166					   an error. */
1167					goto exit_eval_frame;
1168			}
1169			break;
1170
1171		case _PYFRAME_BACKEDGE_TRACE:
1172			/* If we bailed because of a backedge, set instr_prev
1173			   to ensure a line trace call. */
1174			instr_prev = INT_MAX;
1175			break;
1176
1177		case _PYFRAME_CALL_PROFILE:
1178		case _PYFRAME_LINE_TRACE:
1179		case _PYFRAME_FATAL_GUARD_FAIL:
1180		case _PYFRAME_GUARD_FAIL:
1181			/* These are handled by the opcode dispatch loop. */
1182			break;
1183
1184		default:
1185			PyErr_Format(PyExc_SystemError, "unknown bail reason");
1186			goto exit_eval_frame;
1187	}
1188
1189
1190	names = co->co_names;
1191	consts = co->co_consts;
1192	fastlocals = f->f_localsplus;
1193	freevars = f->f_localsplus + co->co_nlocals;
1194	first_instr = (unsigned char*) PyString_AS_STRING(co->co_code);
1195	/* An explanation is in order for the next line.
1196
1197	   f->f_lasti now refers to the index of the last instruction
1198	   executed.  You might think this was obvious from the name, but
1199	   this wasn't always true before 2.3!  PyFrame_New now sets
1200	   f->f_lasti to -1 (i.e. the index *before* the first instruction)
1201	   and YIELD_VALUE doesn't fiddle with f_lasti any more.  So this
1202	   does work.  Promise.
1203
1204	   When the PREDICT() macros are enabled, some opcode pairs follow in
1205           direct succession without updating f->f_lasti.  A successful
1206           prediction effectively links the two codes together as if they
1207           were a single new opcode; accordingly,f->f_lasti will point to
1208           the first code in the pair (for instance, GET_ITER followed by
1209           FOR_ITER is effectively a single opcode and f->f_lasti will point
1210           at to the beginning of the combined pair.)
1211	*/
1212	next_instr = first_instr + f->f_lasti + 1;
1213	stack_pointer = f->f_stacktop;
1214	assert(stack_pointer != NULL);
1215	f->f_stacktop = NULL;	/* remains NULL unless yield suspends frame */
1216
1217#ifdef LLTRACE
1218	lltrace = PyDict_GetItemString(f->f_globals, "__lltrace__") != NULL;
1219#endif
1220#if defined(Py_DEBUG) || defined(LLTRACE)
1221	filename = PyString_AsString(co->co_filename);
1222#endif
1223
1224	why = UNWIND_NOUNWIND;
1225	w = NULL;
1226
1227	/* Note that this goes after the LLVM handling code so we don't log
1228	 * this event when calling LLVM functions. Do this before the throwflag
1229	 * check below to avoid mismatched enter/exit events in the log. */
1230	PY_LOG_TSC_EVENT(CALL_ENTER_EVAL);
1231
1232	if (f->f_throwflag) { /* support for generator.throw() */
1233		why = UNWIND_EXCEPTION;
1234		goto on_error;
1235	}
1236
1237	for (;;) {
1238		assert(stack_pointer >= f->f_valuestack); /* else underflow */
1239		assert(STACK_LEVEL() <= co->co_stacksize);  /* else overflow */
1240
1241		/* Do periodic things.  Doing this every time through
1242		   the loop would add too much overhead, so we do it
1243		   only every Nth instruction.  We also do it if
1244		   ``things_to_do'' is set, i.e. when an asynchronous
1245		   event needs attention (e.g. a signal handler or
1246		   async I/O handler); see Py_AddPendingCall() and
1247		   Py_MakePendingCalls() above. */
1248
1249		if (--_Py_Ticker < 0) {
1250			if (*next_instr == SETUP_FINALLY) {
1251				/* Make the last opcode before
1252				   a try: finally: block uninterruptable. */
1253				goto fast_next_opcode;
1254			}
1255			if (_PyEval_HandlePyTickerExpired(tstate) == -1) {
1256				why = UNWIND_EXCEPTION;
1257				goto on_error;
1258			}
1259		}
1260
1261	fast_next_opcode:
1262		f->f_lasti = INSTR_OFFSET();
1263
1264		/* line-by-line tracing support */
1265
1266		if (_Py_TracingPossible &&
1267		    tstate->c_tracefunc != NULL && !tstate->tracing) {
1268			/* see maybe_call_line_trace
1269			   for expository comments */
1270			f->f_stacktop = stack_pointer;
1271
1272			err = maybe_call_line_trace(tstate->c_tracefunc,
1273						    tstate->c_traceobj,
1274						    f, &instr_lb, &instr_ub,
1275						    &instr_prev);
1276			/* Reload possibly changed frame fields */
1277			JUMPTO(f->f_lasti);
1278			assert(f->f_stacktop != NULL);
1279			stack_pointer = f->f_stacktop;
1280			f->f_stacktop = NULL;
1281			if (err) {
1282				/* trace function raised an exception */
1283				why = UNWIND_EXCEPTION;
1284				goto on_error;
1285			}
1286		}
1287
1288		/* Extract opcode and argument */
1289
1290		opcode = NEXTOP();
1291		oparg = 0;   /* allows oparg to be stored in a register because
1292			it doesn't have to be remembered across a full loop */
1293		if (HAS_ARG(opcode))
1294			oparg = NEXTARG();
1295	  dispatch_opcode:
1296#ifdef DYNAMIC_EXECUTION_PROFILE
1297#ifdef DXPAIRS
1298		dxpairs[lastopcode][opcode]++;
1299		lastopcode = opcode;
1300#endif
1301		dxp[opcode]++;
1302#endif
1303
1304#ifdef LLTRACE
1305		/* Instruction tracing */
1306
1307		if (lltrace) {
1308			if (HAS_ARG(opcode)) {
1309				printf("%d: %d, %d\n",
1310				       f->f_lasti, opcode, oparg);
1311			}
1312			else {
1313				printf("%d: %d\n",
1314				       f->f_lasti, opcode);
1315			}
1316		}
1317#endif
1318
1319		/* Main switch on opcode */
1320
1321		assert(why == UNWIND_NOUNWIND);
1322		/* XXX(jyasskin): Add an assertion under CHECKEXC that
1323		   !PyErr_Occurred(). */
1324		switch (opcode) {
1325
1326		/* BEWARE!
1327		   It is essential that any operation that fails sets
1328		   why to anything but UNWIND_NOUNWIND, and that no operation
1329		   that succeeds does this! */
1330
1331		/* case STOP_CODE: this is an error! */
1332
1333		TARGET(NOP)
1334			FAST_DISPATCH();
1335
1336		TARGET(LOAD_FAST)
1337			x = GETLOCAL(oparg);
1338			if (x != NULL) {
1339				Py_INCREF(x);
1340				PUSH(x);
1341				FAST_DISPATCH();
1342			}
1343			_PyEval_RaiseForUnboundLocal(f, oparg);
1344			why = UNWIND_EXCEPTION;
1345			break;
1346
1347		TARGET(LOAD_CONST)
1348			x = GETITEM(consts, oparg);
1349			Py_INCREF(x);
1350			PUSH(x);
1351			FAST_DISPATCH();
1352
1353		PREDICTED_WITH_ARG(STORE_FAST);
1354		TARGET(STORE_FAST)
1355			v = POP();
1356			SETLOCAL(oparg, v);
1357			FAST_DISPATCH();
1358
1359		TARGET(POP_TOP)
1360			v = POP();
1361			Py_DECREF(v);
1362			FAST_DISPATCH();
1363
1364		TARGET(ROT_TWO)
1365			v = TOP();
1366			w = SECOND();
1367			SET_TOP(w);
1368			SET_SECOND(v);
1369			FAST_DISPATCH();
1370
1371		TARGET(ROT_THREE)
1372			v = TOP();
1373			w = SECOND();
1374			x = THIRD();
1375			SET_TOP(w);
1376			SET_SECOND(x);
1377			SET_THIRD(v);
1378			FAST_DISPATCH();
1379
1380		TARGET(ROT_FOUR)
1381			u = TOP();
1382			v = SECOND();
1383			w = THIRD();
1384			x = FOURTH();
1385			SET_TOP(v);
1386			SET_SECOND(w);
1387			SET_THIRD(x);
1388			SET_FOURTH(u);
1389			FAST_DISPATCH();
1390
1391		TARGET(DUP_TOP)
1392			v = TOP();
1393			Py_INCREF(v);
1394			PUSH(v);
1395			FAST_DISPATCH();
1396
1397		TARGET(DUP_TOP_TWO)
1398			x = TOP();
1399			Py_INCREF(x);
1400			w = SECOND();
1401			Py_INCREF(w);
1402			STACKADJ(2);
1403			SET_TOP(x);
1404			SET_SECOND(w);
1405			FAST_DISPATCH();
1406
1407		TARGET(DUP_TOP_THREE)
1408			x = TOP();
1409			Py_INCREF(x);
1410			w = SECOND();
1411			Py_INCREF(w);
1412			v = THIRD();
1413			Py_INCREF(v);
1414			STACKADJ(3);
1415			SET_TOP(x);
1416			SET_SECOND(w);
1417			SET_THIRD(v);
1418			FAST_DISPATCH();
1419
1420		TARGET(UNARY_POSITIVE)
1421			v = TOP();
1422			RECORD_TYPE(0, v);
1423			x = PyNumber_Positive(v);
1424			Py_DECREF(v);
1425			SET_TOP(x);
1426			if (x == NULL) {
1427				why = UNWIND_EXCEPTION;
1428				break;
1429			}
1430			DISPATCH();
1431
1432		TARGET(UNARY_NEGATIVE)
1433			v = TOP();
1434			RECORD_TYPE(0, v);
1435			x = PyNumber_Negative(v);
1436			Py_DECREF(v);
1437			SET_TOP(x);
1438			if (x == NULL) {
1439				why = UNWIND_EXCEPTION;
1440				break;
1441			}
1442			DISPATCH();
1443
1444		TARGET(UNARY_NOT)
1445			v = TOP();
1446			RECORD_TYPE(0, v);
1447			err = PyObject_IsTrue(v);
1448			Py_DECREF(v);
1449			if (err == 0) {
1450				Py_INCREF(Py_True);
1451				SET_TOP(Py_True);
1452				DISPATCH();
1453			}
1454			else if (err > 0) {
1455				Py_INCREF(Py_False);
1456				SET_TOP(Py_False);
1457				DISPATCH();
1458			}
1459			STACKADJ(-1);
1460			why = UNWIND_EXCEPTION;
1461			break;
1462
1463		TARGET(UNARY_CONVERT)
1464			v = TOP();
1465			RECORD_TYPE(0, v);
1466			x = PyObject_Repr(v);
1467			Py_DECREF(v);
1468			SET_TOP(x);
1469			if (x == NULL) {
1470				why = UNWIND_EXCEPTION;
1471				break;
1472			}
1473			DISPATCH();
1474
1475		TARGET(UNARY_INVERT)
1476			v = TOP();
1477			RECORD_TYPE(0, v);
1478			x = PyNumber_Invert(v);
1479			Py_DECREF(v);
1480			SET_TOP(x);
1481			if (x == NULL) {
1482				why = UNWIND_EXCEPTION;
1483				break;
1484			}
1485			DISPATCH();
1486
1487		TARGET(BINARY_POWER)
1488			w = POP();
1489			v = TOP();
1490			RECORD_TYPE(0, v);
1491			RECORD_TYPE(1, w);
1492			x = PyNumber_Power(v, w, Py_None);
1493			Py_DECREF(v);
1494			Py_DECREF(w);
1495			SET_TOP(x);
1496			if (x == NULL) {
1497				why = UNWIND_EXCEPTION;
1498				break;
1499			}
1500			DISPATCH();
1501
1502		TARGET(BINARY_MULTIPLY)
1503			w = POP();
1504			v = TOP();
1505			RECORD_TYPE(0, v);
1506			RECORD_TYPE(1, w);
1507			x = PyNumber_Multiply(v, w);
1508			Py_DECREF(v);
1509			Py_DECREF(w);
1510			SET_TOP(x);
1511			if (x == NULL) {
1512				why = UNWIND_EXCEPTION;
1513				break;
1514			}
1515			DISPATCH();
1516
1517		TARGET(BINARY_DIVIDE)
1518			if (!_Py_QnewFlag) {
1519				w = POP();
1520				v = TOP();
1521				RECORD_TYPE(0, v);
1522				RECORD_TYPE(1, w);
1523				x = PyNumber_Divide(v, w);
1524				Py_DECREF(v);
1525				Py_DECREF(w);
1526				SET_TOP(x);
1527				if (x == NULL) {
1528					why = UNWIND_EXCEPTION;
1529					break;
1530				}
1531				DISPATCH();
1532			}
1533			/* -Qnew is in effect: jump to BINARY_TRUE_DIVIDE */
1534			goto _binary_true_divide;
1535
1536		TARGET(BINARY_TRUE_DIVIDE)
1537		_binary_true_divide:
1538			w = POP();
1539			v = TOP();
1540			RECORD_TYPE(0, v);
1541			RECORD_TYPE(1, w);
1542			x = PyNumber_TrueDivide(v, w);
1543			Py_DECREF(v);
1544			Py_DECREF(w);
1545			SET_TOP(x);
1546			if (x == NULL) {
1547				why = UNWIND_EXCEPTION;
1548				break;
1549			}
1550			DISPATCH();
1551
1552		TARGET(BINARY_FLOOR_DIVIDE)
1553			w = POP();
1554			v = TOP();
1555			RECORD_TYPE(0, v);
1556			RECORD_TYPE(1, w);
1557			x = PyNumber_FloorDivide(v, w);
1558			Py_DECREF(v);
1559			Py_DECREF(w);
1560			SET_TOP(x);
1561			if (x == NULL) {
1562				why = UNWIND_EXCEPTION;
1563				break;
1564			}
1565			DISPATCH();
1566
1567		TARGET(BINARY_MODULO)
1568			w = POP();
1569			v = TOP();
1570			RECORD_TYPE(0, v);
1571			RECORD_TYPE(1, w);
1572			if (PyString_CheckExact(v))
1573				x = PyString_Format(v, w);
1574			else
1575				x = PyNumber_Remainder(v, w);
1576			Py_DECREF(v);
1577			Py_DECREF(w);
1578			SET_TOP(x);
1579			if (x == NULL) {
1580				why = UNWIND_EXCEPTION;
1581				break;
1582			}
1583			DISPATCH();
1584
1585		TARGET(BINARY_ADD)
1586			w = POP();
1587			v = TOP();
1588			RECORD_TYPE(0, v);
1589			RECORD_TYPE(1, w);
1590			if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) {
1591				/* INLINE: int + int */
1592				register long a, b, i;
1593				a = PyInt_AS_LONG(v);
1594				b = PyInt_AS_LONG(w);
1595				i = a + b;
1596				if ((i^a) < 0 && (i^b) < 0)
1597					goto slow_add;
1598				x = PyInt_FromLong(i);
1599			}
1600			else if (PyString_CheckExact(v) &&
1601				 PyString_CheckExact(w)) {
1602				x = string_concatenate(v, w, f, next_instr);
1603				/* string_concatenate consumed the ref to v */
1604				goto skip_decref_vx;
1605			}
1606			else {
1607			  slow_add:
1608				x = PyNumber_Add(v, w);
1609			}
1610			Py_DECREF(v);
1611		  skip_decref_vx:
1612			Py_DECREF(w);
1613			SET_TOP(x);
1614			if (x == NULL) {
1615				why = UNWIND_EXCEPTION;
1616				break;
1617			}
1618			DISPATCH();
1619
1620		TARGET(BINARY_SUBTRACT)
1621			w = POP();
1622			v = TOP();
1623			RECORD_TYPE(0, v);
1624			RECORD_TYPE(1, w);
1625			if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) {
1626				/* INLINE: int - int */
1627				register long a, b, i;
1628				a = PyInt_AS_LONG(v);
1629				b = PyInt_AS_LONG(w);
1630				i = a - b;
1631				if ((i^a) < 0 && (i^~b) < 0)
1632					goto slow_sub;
1633				x = PyInt_FromLong(i);
1634			}
1635			else {
1636			  slow_sub:
1637				x = PyNumber_Subtract(v, w);
1638			}
1639			Py_DECREF(v);
1640			Py_DECREF(w);
1641			SET_TOP(x);
1642			if (x == NULL) {
1643				why = UNWIND_EXCEPTION;
1644				break;
1645			}
1646			DISPATCH();
1647
1648		TARGET(BINARY_SUBSCR)
1649			w = POP();
1650			v = TOP();
1651			RECORD_TYPE(0, v);
1652			RECORD_TYPE(1, w);
1653			if (PyList_CheckExact(v) && PyInt_CheckExact(w)) {
1654				/* INLINE: list[int] */
1655				Py_ssize_t i = PyInt_AsSsize_t(w);
1656				if (i < 0)
1657					i += PyList_GET_SIZE(v);
1658				if (i >= 0 && i < PyList_GET_SIZE(v)) {
1659					x = PyList_GET_ITEM(v, i);
1660					Py_INCREF(x);
1661				}
1662				else
1663					goto slow_get;
1664			}
1665			else
1666			  slow_get:
1667				x = PyObject_GetItem(v, w);
1668			Py_DECREF(v);
1669			Py_DECREF(w);
1670			SET_TOP(x);
1671			if (x == NULL) {
1672				why = UNWIND_EXCEPTION;
1673				break;
1674			}
1675			DISPATCH();
1676
1677		TARGET(BINARY_LSHIFT)
1678			w = POP();
1679			v = TOP();
1680			RECORD_TYPE(0, v);
1681			RECORD_TYPE(1, w);
1682			x = PyNumber_Lshift(v, w);
1683			Py_DECREF(v);
1684			Py_DECREF(w);
1685			SET_TOP(x);
1686			if (x == NULL) {
1687				why = UNWIND_EXCEPTION;
1688				break;
1689			}
1690			DISPATCH();
1691
1692		TARGET(BINARY_RSHIFT)
1693			w = POP();
1694			v = TOP();
1695			RECORD_TYPE(0, v);
1696			RECORD_TYPE(1, w);
1697			x = PyNumber_Rshift(v, w);
1698			Py_DECREF(v);
1699			Py_DECREF(w);
1700			SET_TOP(x);
1701			if (x == NULL) {
1702				why = UNWIND_EXCEPTION;
1703				break;
1704			}
1705			DISPATCH();
1706
1707		TARGET(BINARY_AND)
1708			w = POP();
1709			v = TOP();
1710			RECORD_TYPE(0, v);
1711			RECORD_TYPE(1, w);
1712			x = PyNumber_And(v, w);
1713			Py_DECREF(v);
1714			Py_DECREF(w);
1715			SET_TOP(x);
1716			if (x == NULL) {
1717				why = UNWIND_EXCEPTION;
1718				break;
1719			}
1720			DISPATCH();
1721
1722		TARGET(BINARY_XOR)
1723			w = POP();
1724			v = TOP();
1725			RECORD_TYPE(0, v);
1726			RECORD_TYPE(1, w);
1727			x = PyNumber_Xor(v, w);
1728			Py_DECREF(v);
1729			Py_DECREF(w);
1730			SET_TOP(x);
1731			if (x == NULL) {
1732				why = UNWIND_EXCEPTION;
1733				break;
1734			}
1735			DISPATCH();
1736
1737		TARGET(BINARY_OR)
1738			w = POP();
1739			v = TOP();
1740			RECORD_TYPE(0, v);
1741			RECORD_TYPE(1, w);
1742			x = PyNumber_Or(v, w);
1743			Py_DECREF(v);
1744			Py_DECREF(w);
1745			SET_TOP(x);
1746			if (x == NULL) {
1747				why = UNWIND_EXCEPTION;
1748				break;
1749			}
1750			DISPATCH();
1751
1752		TARGET(LIST_APPEND)
1753			w = POP();
1754			v = POP();
1755			RECORD_TYPE(0, v);
1756			RECORD_TYPE(1, w);
1757			err = PyList_Append(v, w);
1758			Py_DECREF(v);
1759			Py_DECREF(w);
1760			if (err != 0) {
1761				why = UNWIND_EXCEPTION;
1762				break;
1763			}
1764			PREDICT(JUMP_ABSOLUTE);
1765			DISPATCH();
1766
1767		TARGET(INPLACE_POWER)
1768			w = POP();
1769			v = TOP();
1770			RECORD_TYPE(0, v);
1771			RECORD_TYPE(1, w);
1772			x = PyNumber_InPlacePower(v, w, Py_None);
1773			Py_DECREF(v);
1774			Py_DECREF(w);
1775			SET_TOP(x);
1776			if (x == NULL) {
1777				why = UNWIND_EXCEPTION;
1778				break;
1779			}
1780			DISPATCH();
1781
1782		TARGET(INPLACE_MULTIPLY)
1783			w = POP();
1784			v = TOP();
1785			RECORD_TYPE(0, v);
1786			RECORD_TYPE(1, w);
1787			x = PyNumber_InPlaceMultiply(v, w);
1788			Py_DECREF(v);
1789			Py_DECREF(w);
1790			SET_TOP(x);
1791			if (x == NULL) {
1792				why = UNWIND_EXCEPTION;
1793				break;
1794			}
1795			DISPATCH();
1796
1797		TARGET(INPLACE_DIVIDE)
1798			if (!_Py_QnewFlag) {
1799				w = POP();
1800				v = TOP();
1801				RECORD_TYPE(0, v);
1802				RECORD_TYPE(1, w);
1803				x = PyNumber_InPlaceDivide(v, w);
1804				Py_DECREF(v);
1805				Py_DECREF(w);
1806				SET_TOP(x);
1807				if (x == NULL) {
1808					why = UNWIND_EXCEPTION;
1809					break;
1810				}
1811				DISPATCH();
1812			}
1813			/* -Qnew is in effect: jump to INPLACE_TRUE_DIVIDE */
1814			goto _inplace_true_divide;
1815
1816		TARGET(INPLACE_TRUE_DIVIDE)
1817		_inplace_true_divide:
1818			w = POP();
1819			v = TOP();
1820			RECORD_TYPE(0, v);
1821			RECORD_TYPE(1, w);
1822			x = PyNumber_InPlaceTrueDivide(v, w);
1823			Py_DECREF(v);
1824			Py_DECREF(w);
1825			SET_TOP(x);
1826			if (x == NULL) {
1827				why = UNWIND_EXCEPTION;
1828				break;
1829			}
1830			DISPATCH();
1831
1832		TARGET(INPLACE_FLOOR_DIVIDE)
1833			w = POP();
1834			v = TOP();
1835			RECORD_TYPE(0, v);
1836			RECORD_TYPE(1, w);
1837			x = PyNumber_InPlaceFloorDivide(v, w);
1838			Py_DECREF(v);
1839			Py_DECREF(w);
1840			SET_TOP(x);
1841			if (x == NULL) {
1842				why = UNWIND_EXCEPTION;
1843				break;
1844			}
1845			DISPATCH();
1846
1847		TARGET(INPLACE_MODULO)
1848			w = POP();
1849			v = TOP();
1850			RECORD_TYPE(0, v);
1851			RECORD_TYPE(1, w);
1852			x = PyNumber_InPlaceRemainder(v, w);
1853			Py_DECREF(v);
1854			Py_DECREF(w);
1855			SET_TOP(x);
1856			if (x == NULL) {
1857				why = UNWIND_EXCEPTION;
1858				break;
1859			}
1860			DISPATCH();
1861
1862		TARGET(INPLACE_ADD)
1863			w = POP();
1864			v = TOP();
1865			RECORD_TYPE(0, v);
1866			RECORD_TYPE(1, w);
1867			if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) {
1868				/* INLINE: int + int */
1869				register long a, b, i;
1870				a = PyInt_AS_LONG(v);
1871				b = PyInt_AS_LONG(w);
1872				i = a + b;
1873				if ((i^a) < 0 && (i^b) < 0)
1874					goto slow_iadd;
1875				x = PyInt_FromLong(i);
1876			}
1877			else if (PyString_CheckExact(v) &&
1878				 PyString_CheckExact(w)) {
1879				x = string_concatenate(v, w, f, next_instr);
1880				/* string_concatenate consumed the ref to v */
1881				goto skip_decref_v;
1882			}
1883			else {
1884			  slow_iadd:
1885				x = PyNumber_InPlaceAdd(v, w);
1886			}
1887			Py_DECREF(v);
1888		  skip_decref_v:
1889			Py_DECREF(w);
1890			SET_TOP(x);
1891			if (x == NULL) {
1892				why = UNWIND_EXCEPTION;
1893				break;
1894			}
1895			DISPATCH();
1896
1897		TARGET(INPLACE_SUBTRACT)
1898			w = POP();
1899			v = TOP();
1900			RECORD_TYPE(0, v);
1901			RECORD_TYPE(1, w);
1902			if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) {
1903				/* INLINE: int - int */
1904				register long a, b, i;
1905				a = PyInt_AS_LONG(v);
1906				b = PyInt_AS_LONG(w);
1907				i = a - b;
1908				if ((i^a) < 0 && (i^~b) < 0)
1909					goto slow_isub;
1910				x = PyInt_FromLong(i);
1911			}
1912			else {
1913			  slow_isub:
1914				x = PyNumber_InPlaceSubtract(v, w);
1915			}
1916			Py_DECREF(v);
1917			Py_DECREF(w);
1918			SET_TOP(x);
1919			if (x == NULL) {
1920				why = UNWIND_EXCEPTION;
1921				break;
1922			}
1923			DISPATCH();
1924
1925		TARGET(INPLACE_LSHIFT)
1926			w = POP();
1927			v = TOP();
1928			RECORD_TYPE(0, v);
1929			RECORD_TYPE(1, w);
1930			x = PyNumber_InPlace

Large files files are truncated, but you can click here to view the full file