PageRenderTime 1117ms CodeModel.GetById 131ms app.highlight 718ms RepoModel.GetById 214ms app.codeStats 2ms

/Objects/listobject.c

http://unladen-swallow.googlecode.com/
C | 3027 lines | 2356 code | 278 blank | 393 comment | 593 complexity | 8a9d688b0a1b9fb4a9013be51b62b0d7 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/* List object implementation */
   2
   3#include "Python.h"
   4
   5#ifdef STDC_HEADERS
   6#include <stddef.h>
   7#else
   8#include <sys/types.h>		/* For size_t */
   9#endif
  10
  11/* Ensure ob_item has room for at least newsize elements, and set
  12 * ob_size to newsize.  If newsize > ob_size on entry, the content
  13 * of the new slots at exit is undefined heap trash; it's the caller's
  14 * responsiblity to overwrite them with sane values.
  15 * The number of allocated elements may grow, shrink, or stay the same.
  16 * Failure is impossible if newsize <= self.allocated on entry, although
  17 * that partly relies on an assumption that the system realloc() never
  18 * fails when passed a number of bytes <= the number of bytes last
  19 * allocated (the C standard doesn't guarantee this, but it's hard to
  20 * imagine a realloc implementation where it wouldn't be true).
  21 * Note that self->ob_item may change, and even if newsize is less
  22 * than ob_size on entry.
  23 */
  24static int
  25list_resize(PyListObject *self, Py_ssize_t newsize)
  26{
  27	PyObject **items;
  28	size_t new_allocated;
  29	Py_ssize_t allocated = self->allocated;
  30
  31	/* Bypass realloc() when a previous overallocation is large enough
  32	   to accommodate the newsize.  If the newsize falls lower than half
  33	   the allocated size, then proceed with the realloc() to shrink the list.
  34	*/
  35	if (allocated >= newsize && newsize >= (allocated >> 1)) {
  36		assert(self->ob_item != NULL || newsize == 0);
  37		Py_SIZE(self) = newsize;
  38		return 0;
  39	}
  40
  41	/* This over-allocates proportional to the list size, making room
  42	 * for additional growth.  The over-allocation is mild, but is
  43	 * enough to give linear-time amortized behavior over a long
  44	 * sequence of appends() in the presence of a poorly-performing
  45	 * system realloc().
  46	 * The growth pattern is:  0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
  47	 */
  48	new_allocated = (newsize >> 3) + (newsize < 9 ? 3 : 6);
  49
  50	/* check for integer overflow */
  51	if (new_allocated > PY_SIZE_MAX - newsize) {
  52		PyErr_NoMemory();
  53		return -1;
  54	} else {
  55		new_allocated += newsize;
  56	}
  57
  58	if (newsize == 0)
  59		new_allocated = 0;
  60	items = self->ob_item;
  61	if (new_allocated <= ((~(size_t)0) / sizeof(PyObject *)))
  62		PyMem_RESIZE(items, PyObject *, new_allocated);
  63	else
  64		items = NULL;
  65	if (items == NULL) {
  66		PyErr_NoMemory();
  67		return -1;
  68	}
  69	self->ob_item = items;
  70	Py_SIZE(self) = newsize;
  71	self->allocated = new_allocated;
  72	return 0;
  73}
  74
  75/* Debug statistic to compare allocations with reuse through the free list */
  76#undef SHOW_ALLOC_COUNT
  77#ifdef SHOW_ALLOC_COUNT
  78static size_t count_alloc = 0;
  79static size_t count_reuse = 0;
  80
  81static void
  82show_alloc(void)
  83{
  84	fprintf(stderr, "List allocations: %" PY_FORMAT_SIZE_T "d\n",
  85		count_alloc);
  86	fprintf(stderr, "List reuse through freelist: %" PY_FORMAT_SIZE_T
  87		"d\n", count_reuse);
  88	fprintf(stderr, "%.2f%% reuse rate\n\n",
  89		(100.0*count_reuse/(count_alloc+count_reuse)));
  90}
  91#endif
  92
  93/* Empty list reuse scheme to save calls to malloc and free */
  94#ifndef PyList_MAXFREELIST
  95#define PyList_MAXFREELIST 80
  96#endif
  97static PyListObject *free_list[PyList_MAXFREELIST];
  98static int numfree = 0;
  99
 100void
 101PyList_Fini(void)
 102{
 103	PyListObject *op;
 104
 105	while (numfree) {
 106		op = free_list[--numfree];
 107		assert(PyList_CheckExact(op));
 108		PyObject_GC_Del(op);
 109	}
 110}
 111
 112PyObject *
 113PyList_New(Py_ssize_t size)
 114{
 115	PyListObject *op;
 116	size_t nbytes;
 117#ifdef SHOW_ALLOC_COUNT
 118	static int initialized = 0;
 119	if (!initialized) {
 120		Py_AtExit(show_alloc);
 121		initialized = 1;
 122	}
 123#endif
 124
 125	if (size < 0) {
 126		PyErr_BadInternalCall();
 127		return NULL;
 128	}
 129	nbytes = size * sizeof(PyObject *);
 130	/* Check for overflow without an actual overflow,
 131	 *  which can cause compiler to optimise out */
 132	if (size > PY_SIZE_MAX / sizeof(PyObject *))
 133		return PyErr_NoMemory();
 134	if (numfree) {
 135		numfree--;
 136		op = free_list[numfree];
 137		_Py_NewReference((PyObject *)op);
 138#ifdef SHOW_ALLOC_COUNT
 139		count_reuse++;
 140#endif
 141	} else {
 142		op = PyObject_GC_New(PyListObject, &PyList_Type);
 143		if (op == NULL)
 144			return NULL;
 145#ifdef SHOW_ALLOC_COUNT
 146		count_alloc++;
 147#endif
 148	}
 149	if (size <= 0)
 150		op->ob_item = NULL;
 151	else {
 152		op->ob_item = (PyObject **) PyMem_MALLOC(nbytes);
 153		if (op->ob_item == NULL) {
 154			Py_DECREF(op);
 155			return PyErr_NoMemory();
 156		}
 157		memset(op->ob_item, 0, nbytes);
 158	}
 159	Py_SIZE(op) = size;
 160	op->allocated = size;
 161	_PyObject_GC_TRACK(op);
 162	return (PyObject *) op;
 163}
 164
 165Py_ssize_t
 166PyList_Size(PyObject *op)
 167{
 168	if (!PyList_Check(op)) {
 169		PyErr_BadInternalCall();
 170		return -1;
 171	}
 172	else
 173		return Py_SIZE(op);
 174}
 175
 176static PyObject *indexerr = NULL;
 177
 178PyObject *
 179PyList_GetItem(PyObject *op, Py_ssize_t i)
 180{
 181	if (!PyList_Check(op)) {
 182		PyErr_BadInternalCall();
 183		return NULL;
 184	}
 185	if (i < 0 || i >= Py_SIZE(op)) {
 186		if (indexerr == NULL)
 187			indexerr = PyString_FromString(
 188				"list index out of range");
 189		PyErr_SetObject(PyExc_IndexError, indexerr);
 190		return NULL;
 191	}
 192	return ((PyListObject *)op) -> ob_item[i];
 193}
 194
 195int
 196PyList_SetItem(register PyObject *op, register Py_ssize_t i,
 197               register PyObject *newitem)
 198{
 199	register PyObject *olditem;
 200	register PyObject **p;
 201	if (!PyList_Check(op)) {
 202		Py_XDECREF(newitem);
 203		PyErr_BadInternalCall();
 204		return -1;
 205	}
 206	if (i < 0 || i >= Py_SIZE(op)) {
 207		Py_XDECREF(newitem);
 208		PyErr_SetString(PyExc_IndexError,
 209				"list assignment index out of range");
 210		return -1;
 211	}
 212	p = ((PyListObject *)op) -> ob_item + i;
 213	olditem = *p;
 214	*p = newitem;
 215	Py_XDECREF(olditem);
 216	return 0;
 217}
 218
 219static int
 220ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
 221{
 222	Py_ssize_t i, n = Py_SIZE(self);
 223	PyObject **items;
 224	if (v == NULL) {
 225		PyErr_BadInternalCall();
 226		return -1;
 227	}
 228	if (n == PY_SSIZE_T_MAX) {
 229		PyErr_SetString(PyExc_OverflowError,
 230			"cannot add more objects to list");
 231		return -1;
 232	}
 233
 234	if (list_resize(self, n+1) == -1)
 235		return -1;
 236
 237	if (where < 0) {
 238		where += n;
 239		if (where < 0)
 240			where = 0;
 241	}
 242	if (where > n)
 243		where = n;
 244	items = self->ob_item;
 245	for (i = n; --i >= where; )
 246		items[i+1] = items[i];
 247	Py_INCREF(v);
 248	items[where] = v;
 249	return 0;
 250}
 251
 252int
 253PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
 254{
 255	if (!PyList_Check(op)) {
 256		PyErr_BadInternalCall();
 257		return -1;
 258	}
 259	return ins1((PyListObject *)op, where, newitem);
 260}
 261
 262static int
 263app1(PyListObject *self, PyObject *v)
 264{
 265	Py_ssize_t n = PyList_GET_SIZE(self);
 266
 267	assert (v != NULL);
 268	if (n == PY_SSIZE_T_MAX) {
 269		PyErr_SetString(PyExc_OverflowError,
 270			"cannot add more objects to list");
 271		return -1;
 272	}
 273
 274	if (list_resize(self, n+1) == -1)
 275		return -1;
 276
 277	Py_INCREF(v);
 278	PyList_SET_ITEM(self, n, v);
 279	return 0;
 280}
 281
 282int
 283PyList_Append(PyObject *op, PyObject *newitem)
 284{
 285	if (PyList_Check(op) && (newitem != NULL))
 286		return app1((PyListObject *)op, newitem);
 287	PyErr_BadInternalCall();
 288	return -1;
 289}
 290
 291/* Methods */
 292
 293static void
 294list_dealloc(PyListObject *op)
 295{
 296	Py_ssize_t i;
 297	PyObject_GC_UnTrack(op);
 298	Py_TRASHCAN_SAFE_BEGIN(op)
 299	if (op->ob_item != NULL) {
 300		/* Do it backwards, for Christian Tismer.
 301		   There's a simple test case where somehow this reduces
 302		   thrashing when a *very* large list is created and
 303		   immediately deleted. */
 304		i = Py_SIZE(op);
 305		while (--i >= 0) {
 306			Py_XDECREF(op->ob_item[i]);
 307		}
 308		PyMem_FREE(op->ob_item);
 309	}
 310	if (numfree < PyList_MAXFREELIST && PyList_CheckExact(op))
 311		free_list[numfree++] = op;
 312	else
 313		Py_TYPE(op)->tp_free((PyObject *)op);
 314	Py_TRASHCAN_SAFE_END(op)
 315}
 316
 317static int
 318list_print(PyListObject *op, FILE *fp, int flags)
 319{
 320	int rc;
 321	Py_ssize_t i;
 322
 323	rc = Py_ReprEnter((PyObject*)op);
 324	if (rc != 0) {
 325		if (rc < 0)
 326			return rc;
 327		Py_BEGIN_ALLOW_THREADS
 328		fprintf(fp, "[...]");
 329		Py_END_ALLOW_THREADS
 330		return 0;
 331	}
 332	Py_BEGIN_ALLOW_THREADS
 333	fprintf(fp, "[");
 334	Py_END_ALLOW_THREADS
 335	for (i = 0; i < Py_SIZE(op); i++) {
 336		if (i > 0) {
 337			Py_BEGIN_ALLOW_THREADS
 338			fprintf(fp, ", ");
 339			Py_END_ALLOW_THREADS
 340		}
 341		if (PyObject_Print(op->ob_item[i], fp, 0) != 0) {
 342			Py_ReprLeave((PyObject *)op);
 343			return -1;
 344		}
 345	}
 346	Py_BEGIN_ALLOW_THREADS
 347	fprintf(fp, "]");
 348	Py_END_ALLOW_THREADS
 349	Py_ReprLeave((PyObject *)op);
 350	return 0;
 351}
 352
 353static PyObject *
 354list_repr(PyListObject *v)
 355{
 356	Py_ssize_t i;
 357	PyObject *s, *temp;
 358	PyObject *pieces = NULL, *result = NULL;
 359
 360	i = Py_ReprEnter((PyObject*)v);
 361	if (i != 0) {
 362		return i > 0 ? PyString_FromString("[...]") : NULL;
 363	}
 364
 365	if (Py_SIZE(v) == 0) {
 366		result = PyString_FromString("[]");
 367		goto Done;
 368	}
 369
 370	pieces = PyList_New(0);
 371	if (pieces == NULL)
 372		goto Done;
 373
 374	/* Do repr() on each element.  Note that this may mutate the list,
 375	   so must refetch the list size on each iteration. */
 376	for (i = 0; i < Py_SIZE(v); ++i) {
 377		int status;
 378		if (Py_EnterRecursiveCall(" while getting the repr of a list"))
 379			goto Done;
 380		s = PyObject_Repr(v->ob_item[i]);
 381		Py_LeaveRecursiveCall();
 382		if (s == NULL)
 383			goto Done;
 384		status = PyList_Append(pieces, s);
 385		Py_DECREF(s);  /* append created a new ref */
 386		if (status < 0)
 387			goto Done;
 388	}
 389
 390	/* Add "[]" decorations to the first and last items. */
 391	assert(PyList_GET_SIZE(pieces) > 0);
 392	s = PyString_FromString("[");
 393	if (s == NULL)
 394		goto Done;
 395	temp = PyList_GET_ITEM(pieces, 0);
 396	PyString_ConcatAndDel(&s, temp);
 397	PyList_SET_ITEM(pieces, 0, s);
 398	if (s == NULL)
 399		goto Done;
 400
 401	s = PyString_FromString("]");
 402	if (s == NULL)
 403		goto Done;
 404	temp = PyList_GET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1);
 405	PyString_ConcatAndDel(&temp, s);
 406	PyList_SET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1, temp);
 407	if (temp == NULL)
 408		goto Done;
 409
 410	/* Paste them all together with ", " between. */
 411	s = PyString_FromString(", ");
 412	if (s == NULL)
 413		goto Done;
 414	result = _PyString_Join(s, pieces);
 415	Py_DECREF(s);
 416
 417Done:
 418	Py_XDECREF(pieces);
 419	Py_ReprLeave((PyObject *)v);
 420	return result;
 421}
 422
 423static Py_ssize_t
 424list_length(PyListObject *a)
 425{
 426	return Py_SIZE(a);
 427}
 428
 429static int
 430list_contains(PyListObject *a, PyObject *el)
 431{
 432	Py_ssize_t i;
 433	int cmp;
 434
 435	for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i)
 436		cmp = PyObject_RichCompareBool(el, PyList_GET_ITEM(a, i),
 437						   Py_EQ);
 438	return cmp;
 439}
 440
 441static PyObject *
 442list_item(PyListObject *a, Py_ssize_t i)
 443{
 444	/* If you change this, also change llvm_inline_functions.c */
 445	if (i < 0 || i >= Py_SIZE(a)) {
 446		if (indexerr == NULL)
 447			indexerr = PyString_FromString(
 448				"list index out of range");
 449		PyErr_SetObject(PyExc_IndexError, indexerr);
 450		return NULL;
 451	}
 452	Py_INCREF(a->ob_item[i]);
 453	return a->ob_item[i];
 454}
 455
 456static PyObject *
 457list_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
 458{
 459	PyListObject *np;
 460	PyObject **src, **dest;
 461	Py_ssize_t i, len;
 462	if (ilow < 0)
 463		ilow = 0;
 464	else if (ilow > Py_SIZE(a))
 465		ilow = Py_SIZE(a);
 466	if (ihigh < ilow)
 467		ihigh = ilow;
 468	else if (ihigh > Py_SIZE(a))
 469		ihigh = Py_SIZE(a);
 470	len = ihigh - ilow;
 471	np = (PyListObject *) PyList_New(len);
 472	if (np == NULL)
 473		return NULL;
 474
 475	src = a->ob_item + ilow;
 476	dest = np->ob_item;
 477	for (i = 0; i < len; i++) {
 478		PyObject *v = src[i];
 479		Py_INCREF(v);
 480		dest[i] = v;
 481	}
 482	return (PyObject *)np;
 483}
 484
 485PyObject *
 486PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
 487{
 488	if (!PyList_Check(a)) {
 489		PyErr_BadInternalCall();
 490		return NULL;
 491	}
 492	return list_slice((PyListObject *)a, ilow, ihigh);
 493}
 494
 495static PyObject *
 496list_concat(PyListObject *a, PyObject *bb)
 497{
 498	Py_ssize_t size;
 499	Py_ssize_t i;
 500	PyObject **src, **dest;
 501	PyListObject *np;
 502	if (!PyList_Check(bb)) {
 503		PyErr_Format(PyExc_TypeError,
 504			  "can only concatenate list (not \"%.200s\") to list",
 505			  bb->ob_type->tp_name);
 506		return NULL;
 507	}
 508#define b ((PyListObject *)bb)
 509	size = Py_SIZE(a) + Py_SIZE(b);
 510	if (size < 0)
 511		return PyErr_NoMemory();
 512	np = (PyListObject *) PyList_New(size);
 513	if (np == NULL) {
 514		return NULL;
 515	}
 516	src = a->ob_item;
 517	dest = np->ob_item;
 518	for (i = 0; i < Py_SIZE(a); i++) {
 519		PyObject *v = src[i];
 520		Py_INCREF(v);
 521		dest[i] = v;
 522	}
 523	src = b->ob_item;
 524	dest = np->ob_item + Py_SIZE(a);
 525	for (i = 0; i < Py_SIZE(b); i++) {
 526		PyObject *v = src[i];
 527		Py_INCREF(v);
 528		dest[i] = v;
 529	}
 530	return (PyObject *)np;
 531#undef b
 532}
 533
 534static PyObject *
 535list_repeat(PyListObject *a, Py_ssize_t n)
 536{
 537	Py_ssize_t i, j;
 538	Py_ssize_t size;
 539	PyListObject *np;
 540	PyObject **p, **items;
 541	PyObject *elem;
 542	if (n < 0)
 543		n = 0;
 544	size = Py_SIZE(a) * n;
 545	if (n && size/n != Py_SIZE(a))
 546		return PyErr_NoMemory();
 547	if (size == 0)
 548		return PyList_New(0);
 549	np = (PyListObject *) PyList_New(size);
 550	if (np == NULL)
 551		return NULL;
 552
 553	items = np->ob_item;
 554	if (Py_SIZE(a) == 1) {
 555		elem = a->ob_item[0];
 556		for (i = 0; i < n; i++) {
 557			items[i] = elem;
 558			Py_INCREF(elem);
 559		}
 560		return (PyObject *) np;
 561	}
 562	p = np->ob_item;
 563	items = a->ob_item;
 564	for (i = 0; i < n; i++) {
 565		for (j = 0; j < Py_SIZE(a); j++) {
 566			*p = items[j];
 567			Py_INCREF(*p);
 568			p++;
 569		}
 570	}
 571	return (PyObject *) np;
 572}
 573
 574static int
 575list_clear(PyListObject *a)
 576{
 577	Py_ssize_t i;
 578	PyObject **item = a->ob_item;
 579	if (item != NULL) {
 580		/* Because XDECREF can recursively invoke operations on
 581		   this list, we make it empty first. */
 582		i = Py_SIZE(a);
 583		Py_SIZE(a) = 0;
 584		a->ob_item = NULL;
 585		a->allocated = 0;
 586		while (--i >= 0) {
 587			Py_XDECREF(item[i]);
 588		}
 589		PyMem_FREE(item);
 590	}
 591	/* Never fails; the return value can be ignored.
 592	   Note that there is no guarantee that the list is actually empty
 593	   at this point, because XDECREF may have populated it again! */
 594	return 0;
 595}
 596
 597/* a[ilow:ihigh] = v if v != NULL.
 598 * del a[ilow:ihigh] if v == NULL.
 599 *
 600 * Special speed gimmick:  when v is NULL and ihigh - ilow <= 8, it's
 601 * guaranteed the call cannot fail.
 602 */
 603static int
 604list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
 605{
 606	/* Because [X]DECREF can recursively invoke list operations on
 607	   this list, we must postpone all [X]DECREF activity until
 608	   after the list is back in its canonical shape.  Therefore
 609	   we must allocate an additional array, 'recycle', into which
 610	   we temporarily copy the items that are deleted from the
 611	   list. :-( */
 612	PyObject *recycle_on_stack[8];
 613	PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
 614	PyObject **item;
 615	PyObject **vitem = NULL;
 616	PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
 617	Py_ssize_t n; /* # of elements in replacement list */
 618	Py_ssize_t norig; /* # of elements in list getting replaced */
 619	Py_ssize_t d; /* Change in size */
 620	Py_ssize_t k;
 621	size_t s;
 622	int result = -1;	/* guilty until proved innocent */
 623#define b ((PyListObject *)v)
 624	if (v == NULL)
 625		n = 0;
 626	else {
 627		if (a == b) {
 628			/* Special case "a[i:j] = a" -- copy b first */
 629			v = list_slice(b, 0, Py_SIZE(b));
 630			if (v == NULL)
 631				return result;
 632			result = list_ass_slice(a, ilow, ihigh, v);
 633			Py_DECREF(v);
 634			return result;
 635		}
 636		v_as_SF = PySequence_Fast(v, "can only assign an iterable");
 637		if(v_as_SF == NULL)
 638			goto Error;
 639		n = PySequence_Fast_GET_SIZE(v_as_SF);
 640		vitem = PySequence_Fast_ITEMS(v_as_SF);
 641	}
 642	if (ilow < 0)
 643		ilow = 0;
 644	else if (ilow > Py_SIZE(a))
 645		ilow = Py_SIZE(a);
 646
 647	if (ihigh < ilow)
 648		ihigh = ilow;
 649	else if (ihigh > Py_SIZE(a))
 650		ihigh = Py_SIZE(a);
 651
 652	norig = ihigh - ilow;
 653	assert(norig >= 0);
 654	d = n - norig;
 655	if (Py_SIZE(a) + d == 0) {
 656		Py_XDECREF(v_as_SF);
 657		return list_clear(a);
 658	}
 659	item = a->ob_item;
 660	/* recycle the items that we are about to remove */
 661	s = norig * sizeof(PyObject *);
 662	if (s > sizeof(recycle_on_stack)) {
 663		recycle = (PyObject **)PyMem_MALLOC(s);
 664		if (recycle == NULL) {
 665			PyErr_NoMemory();
 666			goto Error;
 667		}
 668	}
 669	memcpy(recycle, &item[ilow], s);
 670
 671	if (d < 0) { /* Delete -d items */
 672		memmove(&item[ihigh+d], &item[ihigh],
 673			(Py_SIZE(a) - ihigh)*sizeof(PyObject *));
 674		list_resize(a, Py_SIZE(a) + d);
 675		item = a->ob_item;
 676	}
 677	else if (d > 0) { /* Insert d items */
 678		k = Py_SIZE(a);
 679		if (list_resize(a, k+d) < 0)
 680			goto Error;
 681		item = a->ob_item;
 682		memmove(&item[ihigh+d], &item[ihigh],
 683			(k - ihigh)*sizeof(PyObject *));
 684	}
 685	for (k = 0; k < n; k++, ilow++) {
 686		PyObject *w = vitem[k];
 687		Py_XINCREF(w);
 688		item[ilow] = w;
 689	}
 690	for (k = norig - 1; k >= 0; --k)
 691		Py_XDECREF(recycle[k]);
 692	result = 0;
 693 Error:
 694	if (recycle != recycle_on_stack)
 695		PyMem_FREE(recycle);
 696	Py_XDECREF(v_as_SF);
 697	return result;
 698#undef b
 699}
 700
 701int
 702PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
 703{
 704	if (!PyList_Check(a)) {
 705		PyErr_BadInternalCall();
 706		return -1;
 707	}
 708	return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
 709}
 710
 711static PyObject *
 712list_inplace_repeat(PyListObject *self, Py_ssize_t n)
 713{
 714	PyObject **items;
 715	Py_ssize_t size, i, j, p;
 716
 717
 718	size = PyList_GET_SIZE(self);
 719	if (size == 0 || n == 1) {
 720		Py_INCREF(self);
 721		return (PyObject *)self;
 722	}
 723
 724	if (n < 1) {
 725		(void)list_clear(self);
 726		Py_INCREF(self);
 727		return (PyObject *)self;
 728	}
 729
 730	if (size > PY_SSIZE_T_MAX / n) {
 731		return PyErr_NoMemory();
 732	}
 733
 734	if (list_resize(self, size*n) == -1)
 735		return NULL;
 736
 737	p = size;
 738	items = self->ob_item;
 739	for (i = 1; i < n; i++) { /* Start counting at 1, not 0 */
 740		for (j = 0; j < size; j++) {
 741			PyObject *o = items[j];
 742			Py_INCREF(o);
 743			items[p++] = o;
 744		}
 745	}
 746	Py_INCREF(self);
 747	return (PyObject *)self;
 748}
 749
 750static int
 751list_ass_item(PyListObject *a, Py_ssize_t i, PyObject *v)
 752{
 753	/* If you change this, also change llvm_inline_functions.c */
 754	PyObject *old_value;
 755	if (i < 0 || i >= Py_SIZE(a)) {
 756		PyErr_SetString(PyExc_IndexError,
 757				"list assignment index out of range");
 758		return -1;
 759	}
 760	if (v == NULL)
 761		return list_ass_slice(a, i, i+1, v);
 762	Py_INCREF(v);
 763	old_value = a->ob_item[i];
 764	a->ob_item[i] = v;
 765	Py_DECREF(old_value);
 766	return 0;
 767}
 768
 769static PyObject *
 770listinsert(PyListObject *self, PyObject *index, PyObject *v)
 771{
 772	Py_ssize_t i = PyInt_AsSsize_t(index);
 773	if (i == -1 && PyErr_Occurred())
 774		return NULL;
 775
 776	if (ins1(self, i, v) == 0)
 777		Py_RETURN_NONE;
 778	return NULL;
 779}
 780
 781static PyObject *
 782listappend(PyListObject *self, PyObject *v)
 783{
 784	if (app1(self, v) == 0)
 785		Py_RETURN_NONE;
 786	return NULL;
 787}
 788
 789static PyObject *
 790listextend(PyListObject *self, PyObject *b)
 791{
 792	PyObject *it;      /* iter(v) */
 793	Py_ssize_t m;		   /* size of self */
 794	Py_ssize_t n;		   /* guess for size of b */
 795	Py_ssize_t mn;		   /* m + n */
 796	Py_ssize_t i;
 797	PyObject *(*iternext)(PyObject *);
 798
 799	/* Special cases:
 800	   1) lists and tuples which can use PySequence_Fast ops
 801	   2) extending self to self requires making a copy first
 802	*/
 803	if (PyList_CheckExact(b) || PyTuple_CheckExact(b) || (PyObject *)self == b) {
 804		PyObject **src, **dest;
 805		b = PySequence_Fast(b, "argument must be iterable");
 806		if (!b)
 807			return NULL;
 808		n = PySequence_Fast_GET_SIZE(b);
 809		if (n == 0) {
 810			/* short circuit when b is empty */
 811			Py_DECREF(b);
 812			Py_RETURN_NONE;
 813		}
 814		m = Py_SIZE(self);
 815		if (list_resize(self, m + n) == -1) {
 816			Py_DECREF(b);
 817			return NULL;
 818		}
 819		/* note that we may still have self == b here for the
 820		 * situation a.extend(a), but the following code works
 821		 * in that case too.  Just make sure to resize self
 822		 * before calling PySequence_Fast_ITEMS.
 823		 */
 824		/* populate the end of self with b's items */
 825		src = PySequence_Fast_ITEMS(b);
 826		dest = self->ob_item + m;
 827		for (i = 0; i < n; i++) {
 828			PyObject *o = src[i];
 829			Py_INCREF(o);
 830			dest[i] = o;
 831		}
 832		Py_DECREF(b);
 833		Py_RETURN_NONE;
 834	}
 835
 836	it = PyObject_GetIter(b);
 837	if (it == NULL)
 838		return NULL;
 839	iternext = *it->ob_type->tp_iternext;
 840
 841	/* Guess a result list size. */
 842	n = _PyObject_LengthHint(b, 8);
 843	if (n == -1) {
 844		Py_DECREF(it);
 845		return NULL;
 846	}
 847	m = Py_SIZE(self);
 848	mn = m + n;
 849	if (mn >= m) {
 850		/* Make room. */
 851		if (list_resize(self, mn) == -1)
 852			goto error;
 853		/* Make the list sane again. */
 854		Py_SIZE(self) = m;
 855	}
 856	/* Else m + n overflowed; on the chance that n lied, and there really
 857	 * is enough room, ignore it.  If n was telling the truth, we'll
 858	 * eventually run out of memory during the loop.
 859	 */
 860
 861	/* Run iterator to exhaustion. */
 862	for (;;) {
 863		PyObject *item = iternext(it);
 864		if (item == NULL) {
 865			if (PyErr_Occurred()) {
 866				if (PyErr_ExceptionMatches(PyExc_StopIteration))
 867					PyErr_Clear();
 868				else
 869					goto error;
 870			}
 871			break;
 872		}
 873		if (Py_SIZE(self) < self->allocated) {
 874			/* steals ref */
 875			PyList_SET_ITEM(self, Py_SIZE(self), item);
 876			++Py_SIZE(self);
 877		}
 878		else {
 879			int status = app1(self, item);
 880			Py_DECREF(item);  /* append creates a new ref */
 881			if (status < 0)
 882				goto error;
 883		}
 884	}
 885
 886	/* Cut back result list if initial guess was too large. */
 887	if (Py_SIZE(self) < self->allocated)
 888		list_resize(self, Py_SIZE(self));  /* shrinking can't fail */
 889
 890	Py_DECREF(it);
 891	Py_RETURN_NONE;
 892
 893  error:
 894	Py_DECREF(it);
 895	return NULL;
 896}
 897
 898PyObject *
 899_PyList_Extend(PyListObject *self, PyObject *b)
 900{
 901	return listextend(self, b);
 902}
 903
 904static PyObject *
 905list_inplace_concat(PyListObject *self, PyObject *other)
 906{
 907	PyObject *result;
 908
 909	result = listextend(self, other);
 910	if (result == NULL)
 911		return result;
 912	Py_DECREF(result);
 913	Py_INCREF(self);
 914	return (PyObject *)self;
 915}
 916
 917static PyObject *
 918listpop(PyListObject *self, PyObject *arg)
 919{
 920	Py_ssize_t i = -1;
 921	PyObject *v;
 922	int status;
 923
 924	if (arg != NULL) {
 925		i = PyInt_AsSsize_t(arg);
 926		if (i == -1 && PyErr_Occurred())
 927			return NULL;
 928	}
 929
 930	if (Py_SIZE(self) == 0) {
 931		/* Special-case most common failure cause */
 932		PyErr_SetString(PyExc_IndexError, "pop from empty list");
 933		return NULL;
 934	}
 935	if (i < 0)
 936		i += Py_SIZE(self);
 937	if (i < 0 || i >= Py_SIZE(self)) {
 938		PyErr_SetString(PyExc_IndexError, "pop index out of range");
 939		return NULL;
 940	}
 941	v = self->ob_item[i];
 942	if (i == Py_SIZE(self) - 1) {
 943		status = list_resize(self, Py_SIZE(self) - 1);
 944		assert(status >= 0);
 945		return v; /* and v now owns the reference the list had */
 946	}
 947	Py_INCREF(v);
 948	status = list_ass_slice(self, i, i+1, (PyObject *)NULL);
 949	assert(status >= 0);
 950	/* Use status, so that in a release build compilers don't
 951	 * complain about the unused name.
 952	 */
 953	(void) status;
 954
 955	return v;
 956}
 957
 958/* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
 959static void
 960reverse_slice(PyObject **lo, PyObject **hi)
 961{
 962	assert(lo && hi);
 963
 964	--hi;
 965	while (lo < hi) {
 966		PyObject *t = *lo;
 967		*lo = *hi;
 968		*hi = t;
 969		++lo;
 970		--hi;
 971	}
 972}
 973
 974/* Lots of code for an adaptive, stable, natural mergesort.  There are many
 975 * pieces to this algorithm; read listsort.txt for overviews and details.
 976 */
 977
 978/* Comparison function.  Takes care of calling a user-supplied
 979 * comparison function (any callable Python object), which must not be
 980 * NULL (use the ISLT macro if you don't know, or call PyObject_RichCompareBool
 981 * with Py_LT if you know it's NULL).
 982 * Returns -1 on error, 1 if x < y, 0 if x >= y.
 983 */
 984static int
 985islt(PyObject *x, PyObject *y, PyObject *compare)
 986{
 987	PyObject *res;
 988	PyObject *args;
 989	Py_ssize_t i;
 990
 991	assert(compare != NULL);
 992	/* Call the user's comparison function and translate the 3-way
 993	 * result into true or false (or error).
 994	 */
 995	args = PyTuple_New(2);
 996	if (args == NULL)
 997		return -1;
 998	Py_INCREF(x);
 999	Py_INCREF(y);
1000	PyTuple_SET_ITEM(args, 0, x);
1001	PyTuple_SET_ITEM(args, 1, y);
1002	res = PyObject_Call(compare, args, NULL);
1003	Py_DECREF(args);
1004	if (res == NULL)
1005		return -1;
1006	if (!PyInt_Check(res)) {
1007		PyErr_Format(PyExc_TypeError,
1008			     "comparison function must return int, not %.200s",
1009			     res->ob_type->tp_name);
1010		Py_DECREF(res);
1011		return -1;
1012	}
1013	i = PyInt_AsLong(res);
1014	Py_DECREF(res);
1015	return i < 0;
1016}
1017
1018/* If COMPARE is NULL, calls PyObject_RichCompareBool with Py_LT, else calls
1019 * islt.  This avoids a layer of function call in the usual case, and
1020 * sorting does many comparisons.
1021 * Returns -1 on error, 1 if x < y, 0 if x >= y.
1022 */
1023#define ISLT(X, Y, COMPARE) ((COMPARE) == NULL ?			\
1024			     PyObject_RichCompareBool(X, Y, Py_LT) :	\
1025			     islt(X, Y, COMPARE))
1026
1027/* Compare X to Y via "<".  Goto "fail" if the comparison raises an
1028   error.  Else "k" is set to true iff X<Y, and an "if (k)" block is
1029   started.  It makes more sense in context <wink>.  X and Y are PyObject*s.
1030*/
1031#define IFLT(X, Y) if ((k = ISLT(X, Y, compare)) < 0) goto fail;  \
1032		   if (k)
1033
1034/* binarysort is the best method for sorting small arrays: it does
1035   few compares, but can do data movement quadratic in the number of
1036   elements.
1037   [lo, hi) is a contiguous slice of a list, and is sorted via
1038   binary insertion.  This sort is stable.
1039   On entry, must have lo <= start <= hi, and that [lo, start) is already
1040   sorted (pass start == lo if you don't know!).
1041   If islt() complains return -1, else 0.
1042   Even in case of error, the output slice will be some permutation of
1043   the input (nothing is lost or duplicated).
1044*/
1045static int
1046binarysort(PyObject **lo, PyObject **hi, PyObject **start, PyObject *compare)
1047     /* compare -- comparison function object, or NULL for default */
1048{
1049	register Py_ssize_t k;
1050	register PyObject **l, **p, **r;
1051	register PyObject *pivot;
1052
1053	assert(lo <= start && start <= hi);
1054	/* assert [lo, start) is sorted */
1055	if (lo == start)
1056		++start;
1057	for (; start < hi; ++start) {
1058		/* set l to where *start belongs */
1059		l = lo;
1060		r = start;
1061		pivot = *r;
1062		/* Invariants:
1063		 * pivot >= all in [lo, l).
1064		 * pivot  < all in [r, start).
1065		 * The second is vacuously true at the start.
1066		 */
1067		assert(l < r);
1068		do {
1069			p = l + ((r - l) >> 1);
1070			IFLT(pivot, *p)
1071				r = p;
1072			else
1073				l = p+1;
1074		} while (l < r);
1075		assert(l == r);
1076		/* The invariants still hold, so pivot >= all in [lo, l) and
1077		   pivot < all in [l, start), so pivot belongs at l.  Note
1078		   that if there are elements equal to pivot, l points to the
1079		   first slot after them -- that's why this sort is stable.
1080		   Slide over to make room.
1081		   Caution: using memmove is much slower under MSVC 5;
1082		   we're not usually moving many slots. */
1083		for (p = start; p > l; --p)
1084			*p = *(p-1);
1085		*l = pivot;
1086	}
1087	return 0;
1088
1089 fail:
1090	return -1;
1091}
1092
1093/*
1094Return the length of the run beginning at lo, in the slice [lo, hi).  lo < hi
1095is required on entry.  "A run" is the longest ascending sequence, with
1096
1097    lo[0] <= lo[1] <= lo[2] <= ...
1098
1099or the longest descending sequence, with
1100
1101    lo[0] > lo[1] > lo[2] > ...
1102
1103Boolean *descending is set to 0 in the former case, or to 1 in the latter.
1104For its intended use in a stable mergesort, the strictness of the defn of
1105"descending" is needed so that the caller can safely reverse a descending
1106sequence without violating stability (strict > ensures there are no equal
1107elements to get out of order).
1108
1109Returns -1 in case of error.
1110*/
1111static Py_ssize_t
1112count_run(PyObject **lo, PyObject **hi, PyObject *compare, int *descending)
1113{
1114	Py_ssize_t k;
1115	Py_ssize_t n;
1116
1117	assert(lo < hi);
1118	*descending = 0;
1119	++lo;
1120	if (lo == hi)
1121		return 1;
1122
1123	n = 2;
1124	IFLT(*lo, *(lo-1)) {
1125		*descending = 1;
1126		for (lo = lo+1; lo < hi; ++lo, ++n) {
1127			IFLT(*lo, *(lo-1))
1128				;
1129			else
1130				break;
1131		}
1132	}
1133	else {
1134		for (lo = lo+1; lo < hi; ++lo, ++n) {
1135			IFLT(*lo, *(lo-1))
1136				break;
1137		}
1138	}
1139
1140	return n;
1141fail:
1142	return -1;
1143}
1144
1145/*
1146Locate the proper position of key in a sorted vector; if the vector contains
1147an element equal to key, return the position immediately to the left of
1148the leftmost equal element.  [gallop_right() does the same except returns
1149the position to the right of the rightmost equal element (if any).]
1150
1151"a" is a sorted vector with n elements, starting at a[0].  n must be > 0.
1152
1153"hint" is an index at which to begin the search, 0 <= hint < n.  The closer
1154hint is to the final result, the faster this runs.
1155
1156The return value is the int k in 0..n such that
1157
1158    a[k-1] < key <= a[k]
1159
1160pretending that *(a-1) is minus infinity and a[n] is plus infinity.  IOW,
1161key belongs at index k; or, IOW, the first k elements of a should precede
1162key, and the last n-k should follow key.
1163
1164Returns -1 on error.  See listsort.txt for info on the method.
1165*/
1166static Py_ssize_t
1167gallop_left(PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint, PyObject *compare)
1168{
1169	Py_ssize_t ofs;
1170	Py_ssize_t lastofs;
1171	Py_ssize_t k;
1172
1173	assert(key && a && n > 0 && hint >= 0 && hint < n);
1174
1175	a += hint;
1176	lastofs = 0;
1177	ofs = 1;
1178	IFLT(*a, key) {
1179		/* a[hint] < key -- gallop right, until
1180		 * a[hint + lastofs] < key <= a[hint + ofs]
1181		 */
1182		const Py_ssize_t maxofs = n - hint;	/* &a[n-1] is highest */
1183		while (ofs < maxofs) {
1184			IFLT(a[ofs], key) {
1185				lastofs = ofs;
1186				ofs = (ofs << 1) + 1;
1187				if (ofs <= 0)	/* int overflow */
1188					ofs = maxofs;
1189			}
1190 			else	/* key <= a[hint + ofs] */
1191				break;
1192		}
1193		if (ofs > maxofs)
1194			ofs = maxofs;
1195		/* Translate back to offsets relative to &a[0]. */
1196		lastofs += hint;
1197		ofs += hint;
1198	}
1199	else {
1200		/* key <= a[hint] -- gallop left, until
1201		 * a[hint - ofs] < key <= a[hint - lastofs]
1202		 */
1203		const Py_ssize_t maxofs = hint + 1;	/* &a[0] is lowest */
1204		while (ofs < maxofs) {
1205			IFLT(*(a-ofs), key)
1206				break;
1207			/* key <= a[hint - ofs] */
1208			lastofs = ofs;
1209			ofs = (ofs << 1) + 1;
1210			if (ofs <= 0)	/* int overflow */
1211				ofs = maxofs;
1212		}
1213		if (ofs > maxofs)
1214			ofs = maxofs;
1215		/* Translate back to positive offsets relative to &a[0]. */
1216		k = lastofs;
1217		lastofs = hint - ofs;
1218		ofs = hint - k;
1219	}
1220	a -= hint;
1221
1222	assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1223	/* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
1224	 * right of lastofs but no farther right than ofs.  Do a binary
1225	 * search, with invariant a[lastofs-1] < key <= a[ofs].
1226	 */
1227	++lastofs;
1228	while (lastofs < ofs) {
1229		Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1230
1231		IFLT(a[m], key)
1232			lastofs = m+1;	/* a[m] < key */
1233		else
1234			ofs = m;	/* key <= a[m] */
1235	}
1236	assert(lastofs == ofs);		/* so a[ofs-1] < key <= a[ofs] */
1237	return ofs;
1238
1239fail:
1240	return -1;
1241}
1242
1243/*
1244Exactly like gallop_left(), except that if key already exists in a[0:n],
1245finds the position immediately to the right of the rightmost equal value.
1246
1247The return value is the int k in 0..n such that
1248
1249    a[k-1] <= key < a[k]
1250
1251or -1 if error.
1252
1253The code duplication is massive, but this is enough different given that
1254we're sticking to "<" comparisons that it's much harder to follow if
1255written as one routine with yet another "left or right?" flag.
1256*/
1257static Py_ssize_t
1258gallop_right(PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint, PyObject *compare)
1259{
1260	Py_ssize_t ofs;
1261	Py_ssize_t lastofs;
1262	Py_ssize_t k;
1263
1264	assert(key && a && n > 0 && hint >= 0 && hint < n);
1265
1266	a += hint;
1267	lastofs = 0;
1268	ofs = 1;
1269	IFLT(key, *a) {
1270		/* key < a[hint] -- gallop left, until
1271		 * a[hint - ofs] <= key < a[hint - lastofs]
1272		 */
1273		const Py_ssize_t maxofs = hint + 1;	/* &a[0] is lowest */
1274		while (ofs < maxofs) {
1275			IFLT(key, *(a-ofs)) {
1276				lastofs = ofs;
1277				ofs = (ofs << 1) + 1;
1278				if (ofs <= 0)	/* int overflow */
1279					ofs = maxofs;
1280			}
1281			else	/* a[hint - ofs] <= key */
1282				break;
1283		}
1284		if (ofs > maxofs)
1285			ofs = maxofs;
1286		/* Translate back to positive offsets relative to &a[0]. */
1287		k = lastofs;
1288		lastofs = hint - ofs;
1289		ofs = hint - k;
1290	}
1291	else {
1292		/* a[hint] <= key -- gallop right, until
1293		 * a[hint + lastofs] <= key < a[hint + ofs]
1294		*/
1295		const Py_ssize_t maxofs = n - hint;	/* &a[n-1] is highest */
1296		while (ofs < maxofs) {
1297			IFLT(key, a[ofs])
1298				break;
1299			/* a[hint + ofs] <= key */
1300			lastofs = ofs;
1301			ofs = (ofs << 1) + 1;
1302			if (ofs <= 0)	/* int overflow */
1303				ofs = maxofs;
1304		}
1305		if (ofs > maxofs)
1306			ofs = maxofs;
1307		/* Translate back to offsets relative to &a[0]. */
1308		lastofs += hint;
1309		ofs += hint;
1310	}
1311	a -= hint;
1312
1313	assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1314	/* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
1315	 * right of lastofs but no farther right than ofs.  Do a binary
1316	 * search, with invariant a[lastofs-1] <= key < a[ofs].
1317	 */
1318	++lastofs;
1319	while (lastofs < ofs) {
1320		Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1321
1322		IFLT(key, a[m])
1323			ofs = m;	/* key < a[m] */
1324		else
1325			lastofs = m+1;	/* a[m] <= key */
1326	}
1327	assert(lastofs == ofs);		/* so a[ofs-1] <= key < a[ofs] */
1328	return ofs;
1329
1330fail:
1331	return -1;
1332}
1333
1334/* The maximum number of entries in a MergeState's pending-runs stack.
1335 * This is enough to sort arrays of size up to about
1336 *     32 * phi ** MAX_MERGE_PENDING
1337 * where phi ~= 1.618.  85 is ridiculouslylarge enough, good for an array
1338 * with 2**64 elements.
1339 */
1340#define MAX_MERGE_PENDING 85
1341
1342/* When we get into galloping mode, we stay there until both runs win less
1343 * often than MIN_GALLOP consecutive times.  See listsort.txt for more info.
1344 */
1345#define MIN_GALLOP 7
1346
1347/* Avoid malloc for small temp arrays. */
1348#define MERGESTATE_TEMP_SIZE 256
1349
1350/* One MergeState exists on the stack per invocation of mergesort.  It's just
1351 * a convenient way to pass state around among the helper functions.
1352 */
1353struct s_slice {
1354	PyObject **base;
1355	Py_ssize_t len;
1356};
1357
1358typedef struct s_MergeState {
1359	/* The user-supplied comparison function. or NULL if none given. */
1360	PyObject *compare;
1361
1362	/* This controls when we get *into* galloping mode.  It's initialized
1363	 * to MIN_GALLOP.  merge_lo and merge_hi tend to nudge it higher for
1364	 * random data, and lower for highly structured data.
1365	 */
1366	Py_ssize_t min_gallop;
1367
1368	/* 'a' is temp storage to help with merges.  It contains room for
1369	 * alloced entries.
1370	 */
1371	PyObject **a;	/* may point to temparray below */
1372	Py_ssize_t alloced;
1373
1374	/* A stack of n pending runs yet to be merged.  Run #i starts at
1375	 * address base[i] and extends for len[i] elements.  It's always
1376	 * true (so long as the indices are in bounds) that
1377	 *
1378	 *     pending[i].base + pending[i].len == pending[i+1].base
1379	 *
1380	 * so we could cut the storage for this, but it's a minor amount,
1381	 * and keeping all the info explicit simplifies the code.
1382	 */
1383	int n;
1384	struct s_slice pending[MAX_MERGE_PENDING];
1385
1386	/* 'a' points to this when possible, rather than muck with malloc. */
1387	PyObject *temparray[MERGESTATE_TEMP_SIZE];
1388} MergeState;
1389
1390/* Conceptually a MergeState's constructor. */
1391static void
1392merge_init(MergeState *ms, PyObject *compare)
1393{
1394	assert(ms != NULL);
1395	ms->compare = compare;
1396	ms->a = ms->temparray;
1397	ms->alloced = MERGESTATE_TEMP_SIZE;
1398	ms->n = 0;
1399	ms->min_gallop = MIN_GALLOP;
1400}
1401
1402/* Free all the temp memory owned by the MergeState.  This must be called
1403 * when you're done with a MergeState, and may be called before then if
1404 * you want to free the temp memory early.
1405 */
1406static void
1407merge_freemem(MergeState *ms)
1408{
1409	assert(ms != NULL);
1410	if (ms->a != ms->temparray)
1411		PyMem_Free(ms->a);
1412	ms->a = ms->temparray;
1413	ms->alloced = MERGESTATE_TEMP_SIZE;
1414}
1415
1416/* Ensure enough temp memory for 'need' array slots is available.
1417 * Returns 0 on success and -1 if the memory can't be gotten.
1418 */
1419static int
1420merge_getmem(MergeState *ms, Py_ssize_t need)
1421{
1422	assert(ms != NULL);
1423	if (need <= ms->alloced)
1424		return 0;
1425	/* Don't realloc!  That can cost cycles to copy the old data, but
1426	 * we don't care what's in the block.
1427	 */
1428	merge_freemem(ms);
1429	if (need > PY_SSIZE_T_MAX / sizeof(PyObject*)) {
1430		PyErr_NoMemory();
1431		return -1;
1432	}
1433	ms->a = (PyObject **)PyMem_Malloc(need * sizeof(PyObject*));
1434	if (ms->a) {
1435		ms->alloced = need;
1436		return 0;
1437	}
1438	PyErr_NoMemory();
1439	merge_freemem(ms);	/* reset to sane state */
1440	return -1;
1441}
1442#define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 :	\
1443				merge_getmem(MS, NEED))
1444
1445/* Merge the na elements starting at pa with the nb elements starting at pb
1446 * in a stable way, in-place.  na and nb must be > 0, and pa + na == pb.
1447 * Must also have that *pb < *pa, that pa[na-1] belongs at the end of the
1448 * merge, and should have na <= nb.  See listsort.txt for more info.
1449 * Return 0 if successful, -1 if error.
1450 */
1451static Py_ssize_t
1452merge_lo(MergeState *ms, PyObject **pa, Py_ssize_t na,
1453                         PyObject **pb, Py_ssize_t nb)
1454{
1455	Py_ssize_t k;
1456	PyObject *compare;
1457	PyObject **dest;
1458	int result = -1;	/* guilty until proved innocent */
1459	Py_ssize_t min_gallop;
1460
1461	assert(ms && pa && pb && na > 0 && nb > 0 && pa + na == pb);
1462	if (MERGE_GETMEM(ms, na) < 0)
1463		return -1;
1464	memcpy(ms->a, pa, na * sizeof(PyObject*));
1465	dest = pa;
1466	pa = ms->a;
1467
1468	*dest++ = *pb++;
1469	--nb;
1470	if (nb == 0)
1471		goto Succeed;
1472	if (na == 1)
1473		goto CopyB;
1474
1475	min_gallop = ms->min_gallop;
1476	compare = ms->compare;
1477	for (;;) {
1478		Py_ssize_t acount = 0;	/* # of times A won in a row */
1479		Py_ssize_t bcount = 0;	/* # of times B won in a row */
1480
1481		/* Do the straightforward thing until (if ever) one run
1482		 * appears to win consistently.
1483		 */
1484 		for (;;) {
1485 			assert(na > 1 && nb > 0);
1486	 		k = ISLT(*pb, *pa, compare);
1487			if (k) {
1488				if (k < 0)
1489					goto Fail;
1490				*dest++ = *pb++;
1491				++bcount;
1492				acount = 0;
1493				--nb;
1494				if (nb == 0)
1495					goto Succeed;
1496				if (bcount >= min_gallop)
1497					break;
1498			}
1499			else {
1500				*dest++ = *pa++;
1501				++acount;
1502				bcount = 0;
1503				--na;
1504				if (na == 1)
1505					goto CopyB;
1506				if (acount >= min_gallop)
1507					break;
1508			}
1509 		}
1510
1511		/* One run is winning so consistently that galloping may
1512		 * be a huge win.  So try that, and continue galloping until
1513		 * (if ever) neither run appears to be winning consistently
1514		 * anymore.
1515		 */
1516		++min_gallop;
1517		do {
1518 			assert(na > 1 && nb > 0);
1519			min_gallop -= min_gallop > 1;
1520	 		ms->min_gallop = min_gallop;
1521			k = gallop_right(*pb, pa, na, 0, compare);
1522			acount = k;
1523			if (k) {
1524				if (k < 0)
1525					goto Fail;
1526				memcpy(dest, pa, k * sizeof(PyObject *));
1527				dest += k;
1528				pa += k;
1529				na -= k;
1530				if (na == 1)
1531					goto CopyB;
1532				/* na==0 is impossible now if the comparison
1533				 * function is consistent, but we can't assume
1534				 * that it is.
1535				 */
1536				if (na == 0)
1537					goto Succeed;
1538			}
1539			*dest++ = *pb++;
1540			--nb;
1541			if (nb == 0)
1542				goto Succeed;
1543
1544 			k = gallop_left(*pa, pb, nb, 0, compare);
1545 			bcount = k;
1546			if (k) {
1547				if (k < 0)
1548					goto Fail;
1549				memmove(dest, pb, k * sizeof(PyObject *));
1550				dest += k;
1551				pb += k;
1552				nb -= k;
1553				if (nb == 0)
1554					goto Succeed;
1555			}
1556			*dest++ = *pa++;
1557			--na;
1558			if (na == 1)
1559				goto CopyB;
1560 		} while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1561 		++min_gallop;	/* penalize it for leaving galloping mode */
1562 		ms->min_gallop = min_gallop;
1563 	}
1564Succeed:
1565	result = 0;
1566Fail:
1567	if (na)
1568		memcpy(dest, pa, na * sizeof(PyObject*));
1569	return result;
1570CopyB:
1571	assert(na == 1 && nb > 0);
1572	/* The last element of pa belongs at the end of the merge. */
1573	memmove(dest, pb, nb * sizeof(PyObject *));
1574	dest[nb] = *pa;
1575	return 0;
1576}
1577
1578/* Merge the na elements starting at pa with the nb elements starting at pb
1579 * in a stable way, in-place.  na and nb must be > 0, and pa + na == pb.
1580 * Must also have that *pb < *pa, that pa[na-1] belongs at the end of the
1581 * merge, and should have na >= nb.  See listsort.txt for more info.
1582 * Return 0 if successful, -1 if error.
1583 */
1584static Py_ssize_t
1585merge_hi(MergeState *ms, PyObject **pa, Py_ssize_t na, PyObject **pb, Py_ssize_t nb)
1586{
1587	Py_ssize_t k;
1588	PyObject *compare;
1589	PyObject **dest;
1590	int result = -1;	/* guilty until proved innocent */
1591	PyObject **basea;
1592	PyObject **baseb;
1593	Py_ssize_t min_gallop;
1594
1595	assert(ms && pa && pb && na > 0 && nb > 0 && pa + na == pb);
1596	if (MERGE_GETMEM(ms, nb) < 0)
1597		return -1;
1598	dest = pb + nb - 1;
1599	memcpy(ms->a, pb, nb * sizeof(PyObject*));
1600	basea = pa;
1601	baseb = ms->a;
1602	pb = ms->a + nb - 1;
1603	pa += na - 1;
1604
1605	*dest-- = *pa--;
1606	--na;
1607	if (na == 0)
1608		goto Succeed;
1609	if (nb == 1)
1610		goto CopyA;
1611
1612	min_gallop = ms->min_gallop;
1613	compare = ms->compare;
1614	for (;;) {
1615		Py_ssize_t acount = 0;	/* # of times A won in a row */
1616		Py_ssize_t bcount = 0;	/* # of times B won in a row */
1617
1618		/* Do the straightforward thing until (if ever) one run
1619		 * appears to win consistently.
1620		 */
1621 		for (;;) {
1622 			assert(na > 0 && nb > 1);
1623	 		k = ISLT(*pb, *pa, compare);
1624			if (k) {
1625				if (k < 0)
1626					goto Fail;
1627				*dest-- = *pa--;
1628				++acount;
1629				bcount = 0;
1630				--na;
1631				if (na == 0)
1632					goto Succeed;
1633				if (acount >= min_gallop)
1634					break;
1635			}
1636			else {
1637				*dest-- = *pb--;
1638				++bcount;
1639				acount = 0;
1640				--nb;
1641				if (nb == 1)
1642					goto CopyA;
1643				if (bcount >= min_gallop)
1644					break;
1645			}
1646 		}
1647
1648		/* One run is winning so consistently that galloping may
1649		 * be a huge win.  So try that, and continue galloping until
1650		 * (if ever) neither run appears to be winning consistently
1651		 * anymore.
1652		 */
1653		++min_gallop;
1654		do {
1655 			assert(na > 0 && nb > 1);
1656			min_gallop -= min_gallop > 1;
1657	 		ms->min_gallop = min_gallop;
1658			k = gallop_right(*pb, basea, na, na-1, compare);
1659			if (k < 0)
1660				goto Fail;
1661			k = na - k;
1662			acount = k;
1663			if (k) {
1664				dest -= k;
1665				pa -= k;
1666				memmove(dest+1, pa+1, k * sizeof(PyObject *));
1667				na -= k;
1668				if (na == 0)
1669					goto Succeed;
1670			}
1671			*dest-- = *pb--;
1672			--nb;
1673			if (nb == 1)
1674				goto CopyA;
1675
1676 			k = gallop_left(*pa, baseb, nb, nb-1, compare);
1677			if (k < 0)
1678				goto Fail;
1679			k = nb - k;
1680			bcount = k;
1681			if (k) {
1682				dest -= k;
1683				pb -= k;
1684				memcpy(dest+1, pb+1, k * sizeof(PyObject *));
1685				nb -= k;
1686				if (nb == 1)
1687					goto CopyA;
1688				/* nb==0 is impossible now if the comparison
1689				 * function is consistent, but we can't assume
1690				 * that it is.
1691				 */
1692				if (nb == 0)
1693					goto Succeed;
1694			}
1695			*dest-- = *pa--;
1696			--na;
1697			if (na == 0)
1698				goto Succeed;
1699 		} while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1700 		++min_gallop;	/* penalize it for leaving galloping mode */
1701 		ms->min_gallop = min_gallop;
1702 	}
1703Succeed:
1704	result = 0;
1705Fail:
1706	if (nb)
1707		memcpy(dest-(nb-1), baseb, nb * sizeof(PyObject*));
1708	return result;
1709CopyA:
1710	assert(nb == 1 && na > 0);
1711	/* The first element of pb belongs at the front of the merge. */
1712	dest -= na;
1713	pa -= na;
1714	memmove(dest+1, pa+1, na * sizeof(PyObject *));
1715	*dest = *pb;
1716	return 0;
1717}
1718
1719/* Merge the two runs at stack indices i and i+1.
1720 * Returns 0 on success, -1 on error.
1721 */
1722static Py_ssize_t
1723merge_at(MergeState *ms, Py_ssize_t i)
1724{
1725	PyObject **pa, **pb;
1726	Py_ssize_t na, nb;
1727	Py_ssize_t k;
1728	PyObject *compare;
1729
1730	assert(ms != NULL);
1731	assert(ms->n >= 2);
1732	assert(i >= 0);
1733	assert(i == ms->n - 2 || i == ms->n - 3);
1734
1735	pa = ms->pending[i].base;
1736	na = ms->pending[i].len;
1737	pb = ms->pending[i+1].base;
1738	nb = ms->pending[i+1].len;
1739	assert(na > 0 && nb > 0);
1740	assert(pa + na == pb);
1741
1742	/* Record the length of the combined runs; if i is the 3rd-last
1743	 * run now, also slide over the last run (which isn't involved
1744	 * in this merge).  The current run i+1 goes away in any case.
1745	 */
1746	ms->pending[i].len = na + nb;
1747	if (i == ms->n - 3)
1748		ms->pending[i+1] = ms->pending[i+2];
1749	--ms->n;
1750
1751	/* Where does b start in a?  Elements in a before that can be
1752	 * ignored (already in place).
1753	 */
1754	compare = ms->compare;
1755	k = gallop_right(*pb, pa, na, 0, compare);
1756	if (k < 0)
1757		return -1;
1758	pa += k;
1759	na -= k;
1760	if (na == 0)
1761		return 0;
1762
1763	/* Where does a end in b?  Elements in b after that can be
1764	 * ignored (already in place).
1765	 */
1766	nb = gallop_left(pa[na-1], pb, nb, nb-1, compare);
1767	if (nb <= 0)
1768		return nb;
1769
1770	/* Merge what remains of the runs, using a temp array with
1771	 * min(na, nb) elements.
1772	 */
1773	if (na <= nb)
1774		return merge_lo(ms, pa, na, pb, nb);
1775	else
1776		return merge_hi(ms, pa, na, pb, nb);
1777}
1778
1779/* Examine the stack of runs waiting to be merged, merging adjacent runs
1780 * until the stack invariants are re-established:
1781 *
1782 * 1. len[-3] > len[-2] + len[-1]
1783 * 2. len[-2] > len[-1]
1784 *
1785 * See listsort.txt for more info.
1786 *
1787 * Returns 0 on success, -1 on error.
1788 */
1789static int
1790merge_collapse(MergeState *ms)
1791{
1792	struct s_slice *p = ms->pending;
1793
1794	assert(ms);
1795	while (ms->n > 1) {
1796		Py_ssize_t n = ms->n - 2;
1797		if (n > 0 && p[n-1].len <= p[n].len + p[n+1].len) {
1798		    	if (p[n-1].len < p[n+1].len)
1799		    		--n;
1800			if (merge_at(ms, n) < 0)
1801				return -1;
1802		}
1803		else if (p[n].len <= p[n+1].len) {
1804			 if (merge_at(ms, n) < 0)
1805			 	return -1;
1806		}
1807		else
1808			break;
1809	}
1810	return 0;
1811}
1812
1813/* Regardless of invariants, merge all runs on the stack until only one
1814 * remains.  This is used at the end of the mergesort.
1815 *
1816 * Returns 0 on success, -1 on error.
1817 */
1818static int
1819merge_force_collapse(MergeState *ms)
1820{
1821	struct s_slice *p = ms->pending;
1822
1823	assert(ms);
1824	while (ms->n > 1) {
1825		Py_ssize_t n = ms->n - 2;
1826		if (n > 0 && p[n-1].len < p[n+1].len)
1827			--n;
1828		if (merge_at(ms, n) < 0)
1829			return -1;
1830	}
1831	return 0;
1832}
1833
1834/* Compute a good value for the minimum run length; natural runs shorter
1835 * than this are boosted artificially via binary insertion.
1836 *
1837 * If n < 64, return n (it's too small to bother with fancy stuff).
1838 * Else if n is an exact power of 2, return 32.
1839 * Else return an int k, 32 <= k <= 64, such that n/k is close to, but
1840 * strictly less than, an exact power of 2.
1841 *
1842 * See listsort.txt for more info.
1843 */
1844static Py_ssize_t
1845merge_compute_minrun(Py_ssize_t n)
1846{
1847	Py_ssize_t r = 0;	/* becomes 1 if any 1 bits are shifted off */
1848
1849	assert(n >= 0);
1850	while (n >= 64) {
1851		r |= n & 1;
1852		n >>= 1;
1853	}
1854	return n + r;
1855}
1856
1857/* Special wrapper to support stable sorting using the decorate-sort-undecorate
1858   pattern.  Holds a key which is used for comparisons and the original record
1859   which is returned during the undecorate phase.  By exposing only the key
1860   during comparisons, the underlying sort stability characteristics are left
1861   unchanged.  Also, if a custom comparison function is used, it will only see
1862   the key instead of a full record. */
1863
1864typedef struct {
1865	PyObject_HEAD
1866	PyObject *key;
1867	PyObject *value;
1868} sortwrapperobject;
1869
1870PyDoc_STRVAR(sortwrapper_doc, "Object wrapper with a custom sort key.");
1871static PyObject *
1872sortwrapper_richcompare(sortwrapperobject *, sortwrapperobject *, int);
1873static void
1874sortwrapper_dealloc(sortwrapperobject *);
1875
1876static PyTypeObject sortwrapper_type = {
1877	PyVarObject_HEAD_INIT(&PyType_Type, 0)
1878	"sortwrapper",				/* tp_name */
1879	sizeof(sortwrapperobject),		/* tp_basicsize */
1880	0,					/* tp_itemsize */
1881	/* methods */
1882	(destructor)sortwrapper_dealloc,	/* tp_dealloc */
1883	0,					/* tp_print */
1884	0,					/* tp_getattr */
1885	0,					/* tp_setattr */
1886	0,					/* tp_compare */
1887	0,					/* tp_repr */
1888	0,					/* tp_as_number */
1889	0,					/* tp_as_sequence */
1890	0,					/* tp_as_mapping */
1891	0,					/* tp_hash */
1892	0,					/* tp_call */
1893	0,					/* tp_str */
1894	PyObject_GenericGetAttr,		/* tp_getattro */
1895	0,					/* tp_setattro */
1896	0,					/* tp_as_buffer */
1897	Py_TPFLAGS_DEFAULT |
1898	Py_TPFLAGS_HAVE_RICHCOMPARE, 		/* tp_flags */
1899	sortwrapper_doc,			/* tp_doc */
1900	0,					/* tp_traverse */
1901	0,					/* tp_clear */
1902	(richcmpfunc)sortwrapper_richcompare,	/* tp_richcompare */
1903};
1904
1905
1906static PyObject *
1907sortwrapper_richcompare(sortwrapperobject *a, sortwrapperobject *b, int op)
1908{
1909	if (!PyObject_TypeCheck(b, &sortwrapper_type)) {
1910		PyErr_SetString(PyExc_TypeError,
1911			"expected a sortwrapperobject");
1912		return NULL;
1913	}
1914	return PyObject_RichCompare(a->key, b->key, op);
1915}
1916
1917static void
1918sortwrapper_dealloc(sortwrapperobject *so)
1919{
1920	Py_XDECREF(so->key);
1921	Py_XDECREF(so->value);
1922	PyObject_Del(so);
1923}
1924
1925/* Returns a new reference to a sortwrapper.
1926   Consumes the references to the two underlying objects. */
1927
1928static PyObject *
1929build_sortwrapper(PyObject *key, PyObject *value)
1930{
1931	sortwrapperobject *so;
1932
1933	so = PyObject_New(sortwrapperobject, &sortwrapper_type);
1934	if (so == NULL)
1935		return NULL;
1936	so->key = key;
1937	so->value = value;
1938	return (PyObject *)so;
1939}
1940
1941/* Returns a new reference to the value underlying the wrapper. */
1942static PyObject *
1943sortwrapper_getvalue(PyObject *so)
1944{
1945	PyObject *value;
1946
1947	if (!PyObject_TypeCheck(so, &sortwrapper_type)) {
1948		PyErr_SetString(PyExc_TypeError,
1949			"expected a sortwrapperobject");
1950		return NULL;
1951	}
1952	value = ((sortwrapperobject *)so)->value;
1953	Py_INCREF(value);
1954	return value;
1955}
1956
1957/* Wrapper for user specified cmp functions in combination with a
1958   specified key function.  Makes sure the cmp function is presented
1959   with the actual key instead of the sortwrapper */
1960
1961typedef struct {
1962	PyObject_HEAD
1963	PyObject *func;
1964} cmpwrapperobject;
1965
1966static void
1967cmpwrapper_dealloc(cmpwrapperobject *co)
1968{
1969	Py_XDECREF(co->func);
1970	PyObject_Del(co);
1971}
1972
1973static PyObject *
1974cmpwrapper_call(cmpwrapperobject *co, PyObject *args, PyObject *kwds)
1975{
1976	PyObject *x, *y, *xx, *yy;
1977
1978	if (!PyArg_UnpackTuple(args, "", 2, 2, &x, &y))
1979		return NULL;
1980	if (!PyObject_TypeCheck(x, &sortwrapper_type) ||
1981	    !PyObject_TypeCheck(y, &sortwrapper_type)) {
1982		PyErr_SetString(PyExc_TypeError,
1983			"expected a sortwrapperobject");
1984		return NULL;
1985	}
1986	xx = ((sortwrapperobject *)x)->key;
1987	yy = ((sortwrapperobject *)y)->key;
1988	return PyObject_CallFunctionObjArgs(co->func, xx, yy, NULL);
1989}
1990
1991PyDoc_STRVAR(cmpwrapper_doc, "cmp() wrapper for sort with custom keys.");
1992
1993static PyTypeObject cmpwrapper_type = {
1994	PyVarObject_HEAD_INIT(&PyType_Type, 0)
1995	"cmpwrapper",				/* tp_name */
1996	sizeof(cmpwrapperobject),		/* tp_basicsize */
1997	0,					/* tp_itemsize */
1998	/* methods */
1999	(destructor)cmpwrapper_dealloc,		/* tp_dealloc */
2000	0,					/* tp_print */
2001	0,					/* tp_getattr */
2002	0,					/* tp_setattr */
2003	0,					/* tp_compare */
2004	0,					/* tp_repr */
2005	0,					/* tp_as_number */
2006	0,					/* tp_as_sequence */
2007	0,					/* tp_as_mapping */
2008	0,					/* tp_hash */
2009	(ternaryfunc)cmpwrapper_call,		/* tp_call */
2010	0,					/* tp_str */
2011	PyObject_GenericGetAttr,		/* tp_getattro */
2012	0,					/* tp_setattro */
2013	0,					/* tp_as_buffer */
2014	Py_TPFLAGS_DEFAULT,			/* tp_flags */
2015	cmpwrapper_doc,				/* tp_doc */
2016};
2017
2018static PyObject *
2019build_cmpwrapper(PyObject *cmpfunc)
2020{
2021	cmpwrapperobject *co;
2022
2023	co = PyObject_New(cmpwrapperobject, &cmpwrapper_type);
2024	if (co == NULL)
2025		return NULL;
2026	Py_INCREF(cmpfunc);
2027	co->func = cmpfunc;
2028	return (PyObject *)co;
2029}
2030
2031/* An adaptive, stable, natural mergesort.  See listsort.txt.
2032 * Returns Py_None on success, NULL on error.  Even in case of error, the
2033 * list will be some permutation of its input state (nothing is lost or
2034 * duplicated).
2035 */
2036static PyObject *
2037listsort(PyListObject *self, PyObject *args, PyObject *kwds)
2038{
2039	MergeState ms;
2040	PyObject **lo, **hi;
2041	Py_ssize_t nremaining;
2042	Py_ssize_t minrun;
2043	Py_ssize_t saved_ob_size, saved_allocated;
2044	PyObject **saved_ob_item;
2045	PyObject **final_ob_item;
2046	PyObject *compare = NULL;
2047	PyObject *result = NULL;	/* guilty until proved innocent */
2048	int reverse = 0;
2049	PyObject *keyfunc = NULL;
2050	Py_ssize_t i;
2051	PyObject *key, *value, *kvpair;
2052	static char *kwlist[] = {"cmp", "key", "reverse", 0};
2053
2054	assert(self != NULL);
2055	assert (PyList_Check(self));
2056	if (args != NULL) {
2057		if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi:sort",
2058			kwlist, &compare, &keyfunc, &reverse))
2059			return NULL;
2060	}
2061	if (compare == Py_None)
2062		compare = NULL;
2063	if (compare != NULL && 
2064	    PyErr_WarnPy3k("the cmp argument is not supported in 3.x", 1) < 0)
2065		return NULL;
2066	if (keyfunc == Py_None)
2067		keyfunc = NULL;
2068	if (compare != NULL && keyfunc != NULL) {
2069		compare = build_cmpwrapper(compare);
2070		if (compare == NULL)
2071			return NULL;
2072	} else
2073		Py_XINCREF(compare);
2074
2075	/* The list is temporarily made empty, so that mutations performed
2076	 * by comparison functions can't affect the slice of memory we're
2077	 * sorting (allowing mutations during sorting is a core-dump
2078	 * factory, since ob_item may change).
2079	 */
2080	saved_ob_size = Py_SIZE(self);
2081	s

Large files files are truncated, but you can click here to view the full file