PageRenderTime 80ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 2ms

/drivers/sqlite-wp7/sqlite/btree_c.cs

https://bitbucket.org/digitalizarte/coolstorage
C# | 9518 lines | 6172 code | 602 blank | 2744 comment | 1863 complexity | 577f6e1cb79f3f8dfb87f580fc3d20fd MD5 | raw file

Large files files are truncated, but you can click here to view the full file

  1. using System;
  2. using System.Diagnostics;
  3. using System.Text;
  4. using i64 = System.Int64;
  5. using u8 = System.Byte;
  6. using u16 = System.UInt16;
  7. using u32 = System.UInt32;
  8. using u64 = System.UInt64;
  9. using sqlite3_int64 = System.Int64;
  10. using Pgno = System.UInt32;
  11. namespace Community.CsharpSqlite
  12. {
  13. using DbPage = Sqlite3.PgHdr;
  14. public partial class Sqlite3
  15. {
  16. /*
  17. ** 2004 April 6
  18. **
  19. ** The author disclaims copyright to this source code. In place of
  20. ** a legal notice, here is a blessing:
  21. **
  22. ** May you do good and not evil.
  23. ** May you find forgiveness for yourself and forgive others.
  24. ** May you share freely, never taking more than you give.
  25. **
  26. ** This file implements a external (disk-based) database using BTrees.
  27. ** See the header comment on "btreeInt.h" for additional information.
  28. ** Including a description of file format and an overview of operation.
  29. *************************************************************************
  30. ** Included in SQLite3 port to C#-SQLite; 2008 Noah B Hart
  31. ** C#-SQLite is an independent reimplementation of the SQLite software library
  32. **
  33. ** SQLITE_SOURCE_ID: 2011-01-28 17:03:50 ed759d5a9edb3bba5f48f243df47be29e3fe8cd7
  34. **
  35. *************************************************************************
  36. */
  37. //#include "btreeInt.h"
  38. /*
  39. ** The header string that appears at the beginning of every
  40. ** SQLite database.
  41. */
  42. static byte[] zMagicHeader = Encoding.UTF8.GetBytes( SQLITE_FILE_HEADER );
  43. /*
  44. ** Set this global variable to 1 to enable tracing using the TRACE
  45. ** macro.
  46. */
  47. #if TRACE
  48. static bool sqlite3BtreeTrace=false; /* True to enable tracing */
  49. //# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);}
  50. static void TRACE(string X, params object[] ap) { if (sqlite3BtreeTrace) printf(X, ap); }
  51. #else
  52. //# define TRACE(X)
  53. static void TRACE( string X, params object[] ap )
  54. {
  55. }
  56. #endif
  57. /*
  58. ** Extract a 2-byte big-endian integer from an array of unsigned bytes.
  59. ** But if the value is zero, make it 65536.
  60. **
  61. ** This routine is used to extract the "offset to cell content area" value
  62. ** from the header of a btree page. If the page size is 65536 and the page
  63. ** is empty, the offset should be 65536, but the 2-byte value stores zero.
  64. ** This routine makes the necessary adjustment to 65536.
  65. */
  66. //#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1)
  67. static int get2byteNotZero( byte[] X, int offset )
  68. {
  69. return ( ( ( ( (int)get2byte( X, offset ) ) - 1 ) & 0xffff ) + 1 );
  70. }
  71. #if !SQLITE_OMIT_SHARED_CACHE
  72. /*
  73. ** A list of BtShared objects that are eligible for participation
  74. ** in shared cache. This variable has file scope during normal builds,
  75. ** but the test harness needs to access it so we make it global for
  76. ** test builds.
  77. **
  78. ** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER.
  79. */
  80. #if SQLITE_TEST
  81. BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
  82. #else
  83. static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
  84. #endif
  85. #endif //* SQLITE_OMIT_SHARED_CACHE */
  86. #if !SQLITE_OMIT_SHARED_CACHE
  87. /*
  88. ** Enable or disable the shared pager and schema features.
  89. **
  90. ** This routine has no effect on existing database connections.
  91. ** The shared cache setting effects only future calls to
  92. ** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
  93. */
  94. int sqlite3_enable_shared_cache(int enable){
  95. sqlite3GlobalConfig.sharedCacheEnabled = enable;
  96. return SQLITE_OK;
  97. }
  98. #endif
  99. #if SQLITE_OMIT_SHARED_CACHE
  100. /*
  101. ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(),
  102. ** and clearAllSharedCacheTableLocks()
  103. ** manipulate entries in the BtShared.pLock linked list used to store
  104. ** shared-cache table level locks. If the library is compiled with the
  105. ** shared-cache feature disabled, then there is only ever one user
  106. ** of each BtShared structure and so this locking is not necessary.
  107. ** So define the lock related functions as no-ops.
  108. */
  109. //#define querySharedCacheTableLock(a,b,c) SQLITE_OK
  110. static int querySharedCacheTableLock( Btree p, Pgno iTab, u8 eLock )
  111. {
  112. return SQLITE_OK;
  113. }
  114. //#define setSharedCacheTableLock(a,b,c) SQLITE_OK
  115. //#define clearAllSharedCacheTableLocks(a)
  116. static void clearAllSharedCacheTableLocks( Btree a )
  117. {
  118. }
  119. //#define downgradeAllSharedCacheTableLocks(a)
  120. static void downgradeAllSharedCacheTableLocks( Btree a )
  121. {
  122. }
  123. //#define hasSharedCacheTableLock(a,b,c,d) 1
  124. static bool hasSharedCacheTableLock( Btree a, Pgno b, int c, int d )
  125. {
  126. return true;
  127. }
  128. //#define hasReadConflicts(a, b) 0
  129. static bool hasReadConflicts( Btree a, Pgno b )
  130. {
  131. return false;
  132. }
  133. #endif
  134. #if !SQLITE_OMIT_SHARED_CACHE
  135. #if SQLITE_DEBUG
  136. /*
  137. **** This function is only used as part of an assert() statement. ***
  138. **
  139. ** Check to see if pBtree holds the required locks to read or write to the
  140. ** table with root page iRoot. Return 1 if it does and 0 if not.
  141. **
  142. ** For example, when writing to a table with root-page iRoot via
  143. ** Btree connection pBtree:
  144. **
  145. ** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) );
  146. **
  147. ** When writing to an index that resides in a sharable database, the
  148. ** caller should have first obtained a lock specifying the root page of
  149. ** the corresponding table. This makes things a bit more complicated,
  150. ** as this module treats each table as a separate structure. To determine
  151. ** the table corresponding to the index being written, this
  152. ** function has to search through the database schema.
  153. **
  154. ** Instead of a lock on the table/index rooted at page iRoot, the caller may
  155. ** hold a write-lock on the schema table (root page 1). This is also
  156. ** acceptable.
  157. */
  158. static int hasSharedCacheTableLock(
  159. Btree pBtree, /* Handle that must hold lock */
  160. Pgno iRoot, /* Root page of b-tree */
  161. int isIndex, /* True if iRoot is the root of an index b-tree */
  162. int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */
  163. ){
  164. Schema pSchema = (Schema *)pBtree.pBt.pSchema;
  165. Pgno iTab = 0;
  166. BtLock pLock;
  167. /* If this database is not shareable, or if the client is reading
  168. ** and has the read-uncommitted flag set, then no lock is required.
  169. ** Return true immediately.
  170. */
  171. if( (pBtree.sharable==null)
  172. || (eLockType==READ_LOCK && (pBtree.db.flags & SQLITE_ReadUncommitted))
  173. ){
  174. return 1;
  175. }
  176. /* If the client is reading or writing an index and the schema is
  177. ** not loaded, then it is too difficult to actually check to see if
  178. ** the correct locks are held. So do not bother - just return true.
  179. ** This case does not come up very often anyhow.
  180. */
  181. if( isIndex && (!pSchema || (pSchema->flags&DB_SchemaLoaded)==0) ){
  182. return 1;
  183. }
  184. /* Figure out the root-page that the lock should be held on. For table
  185. ** b-trees, this is just the root page of the b-tree being read or
  186. ** written. For index b-trees, it is the root page of the associated
  187. ** table. */
  188. if( isIndex ){
  189. HashElem p;
  190. for(p=sqliteHashFirst(pSchema.idxHash); p!=null; p=sqliteHashNext(p)){
  191. Index pIdx = (Index *)sqliteHashData(p);
  192. if( pIdx.tnum==(int)iRoot ){
  193. iTab = pIdx.pTable.tnum;
  194. }
  195. }
  196. }else{
  197. iTab = iRoot;
  198. }
  199. /* Search for the required lock. Either a write-lock on root-page iTab, a
  200. ** write-lock on the schema table, or (if the client is reading) a
  201. ** read-lock on iTab will suffice. Return 1 if any of these are found. */
  202. for(pLock=pBtree.pBt.pLock; pLock; pLock=pLock.pNext){
  203. if( pLock.pBtree==pBtree
  204. && (pLock.iTable==iTab || (pLock.eLock==WRITE_LOCK && pLock.iTable==1))
  205. && pLock.eLock>=eLockType
  206. ){
  207. return 1;
  208. }
  209. }
  210. /* Failed to find the required lock. */
  211. return 0;
  212. }
  213. #endif //* SQLITE_DEBUG */
  214. #if SQLITE_DEBUG
  215. /*
  216. ** This function may be used as part of assert() statements only. ****
  217. **
  218. ** Return true if it would be illegal for pBtree to write into the
  219. ** table or index rooted at iRoot because other shared connections are
  220. ** simultaneously reading that same table or index.
  221. **
  222. ** It is illegal for pBtree to write if some other Btree object that
  223. ** shares the same BtShared object is currently reading or writing
  224. ** the iRoot table. Except, if the other Btree object has the
  225. ** read-uncommitted flag set, then it is OK for the other object to
  226. ** have a read cursor.
  227. **
  228. ** For example, before writing to any part of the table or index
  229. ** rooted at page iRoot, one should call:
  230. **
  231. ** assert( !hasReadConflicts(pBtree, iRoot) );
  232. */
  233. static int hasReadConflicts(Btree pBtree, Pgno iRoot){
  234. BtCursor p;
  235. for(p=pBtree.pBt.pCursor; p!=null; p=p.pNext){
  236. if( p.pgnoRoot==iRoot
  237. && p.pBtree!=pBtree
  238. && 0==(p.pBtree.db.flags & SQLITE_ReadUncommitted)
  239. ){
  240. return 1;
  241. }
  242. }
  243. return 0;
  244. }
  245. #endif //* #if SQLITE_DEBUG */
  246. /*
  247. ** Query to see if Btree handle p may obtain a lock of type eLock
  248. ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return
  249. ** SQLITE_OK if the lock may be obtained (by calling
  250. ** setSharedCacheTableLock()), or SQLITE_LOCKED if not.
  251. */
  252. static int querySharedCacheTableLock(Btree p, Pgno iTab, u8 eLock){
  253. BtShared pBt = p.pBt;
  254. BtLock pIter;
  255. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  256. Debug.Assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
  257. Debug.Assert( p.db!=null );
  258. Debug.Assert( !(p.db.flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 );
  259. /* If requesting a write-lock, then the Btree must have an open write
  260. ** transaction on this file. And, obviously, for this to be so there
  261. ** must be an open write transaction on the file itself.
  262. */
  263. Debug.Assert( eLock==READ_LOCK || (p==pBt.pWriter && p.inTrans==TRANS_WRITE) );
  264. Debug.Assert( eLock==READ_LOCK || pBt.inTransaction==TRANS_WRITE );
  265. /* This routine is a no-op if the shared-cache is not enabled */
  266. if( !p.sharable ){
  267. return SQLITE_OK;
  268. }
  269. /* If some other connection is holding an exclusive lock, the
  270. ** requested lock may not be obtained.
  271. */
  272. if( pBt.pWriter!=p && pBt.isExclusive ){
  273. sqlite3ConnectionBlocked(p.db, pBt.pWriter.db);
  274. return SQLITE_LOCKED_SHAREDCACHE;
  275. }
  276. for(pIter=pBt.pLock; pIter; pIter=pIter.pNext){
  277. /* The condition (pIter.eLock!=eLock) in the following if(...)
  278. ** statement is a simplification of:
  279. **
  280. ** (eLock==WRITE_LOCK || pIter.eLock==WRITE_LOCK)
  281. **
  282. ** since we know that if eLock==WRITE_LOCK, then no other connection
  283. ** may hold a WRITE_LOCK on any table in this file (since there can
  284. ** only be a single writer).
  285. */
  286. Debug.Assert( pIter.eLock==READ_LOCK || pIter.eLock==WRITE_LOCK );
  287. Debug.Assert( eLock==READ_LOCK || pIter.pBtree==p || pIter.eLock==READ_LOCK);
  288. if( pIter.pBtree!=p && pIter.iTable==iTab && pIter.eLock!=eLock ){
  289. sqlite3ConnectionBlocked(p.db, pIter.pBtree.db);
  290. if( eLock==WRITE_LOCK ){
  291. Debug.Assert( p==pBt.pWriter );
  292. pBt.isPending = 1;
  293. }
  294. return SQLITE_LOCKED_SHAREDCACHE;
  295. }
  296. }
  297. return SQLITE_OK;
  298. }
  299. #endif //* !SQLITE_OMIT_SHARED_CACHE */
  300. #if !SQLITE_OMIT_SHARED_CACHE
  301. /*
  302. ** Add a lock on the table with root-page iTable to the shared-btree used
  303. ** by Btree handle p. Parameter eLock must be either READ_LOCK or
  304. ** WRITE_LOCK.
  305. **
  306. ** This function assumes the following:
  307. **
  308. ** (a) The specified Btree object p is connected to a sharable
  309. ** database (one with the BtShared.sharable flag set), and
  310. **
  311. ** (b) No other Btree objects hold a lock that conflicts
  312. ** with the requested lock (i.e. querySharedCacheTableLock() has
  313. ** already been called and returned SQLITE_OK).
  314. **
  315. ** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM
  316. ** is returned if a malloc attempt fails.
  317. */
  318. static int setSharedCacheTableLock(Btree p, Pgno iTable, u8 eLock){
  319. BtShared pBt = p.pBt;
  320. BtLock pLock = 0;
  321. BtLock pIter;
  322. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  323. Debug.Assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
  324. Debug.Assert( p.db!=null );
  325. /* A connection with the read-uncommitted flag set will never try to
  326. ** obtain a read-lock using this function. The only read-lock obtained
  327. ** by a connection in read-uncommitted mode is on the sqlite_master
  328. ** table, and that lock is obtained in BtreeBeginTrans(). */
  329. Debug.Assert( 0==(p.db.flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK );
  330. /* This function should only be called on a sharable b-tree after it
  331. ** has been determined that no other b-tree holds a conflicting lock. */
  332. Debug.Assert( p.sharable );
  333. Debug.Assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) );
  334. /* First search the list for an existing lock on this table. */
  335. for(pIter=pBt.pLock; pIter; pIter=pIter.pNext){
  336. if( pIter.iTable==iTable && pIter.pBtree==p ){
  337. pLock = pIter;
  338. break;
  339. }
  340. }
  341. /* If the above search did not find a BtLock struct associating Btree p
  342. ** with table iTable, allocate one and link it into the list.
  343. */
  344. if( !pLock ){
  345. pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock));
  346. if( !pLock ){
  347. return SQLITE_NOMEM;
  348. }
  349. pLock.iTable = iTable;
  350. pLock.pBtree = p;
  351. pLock.pNext = pBt.pLock;
  352. pBt.pLock = pLock;
  353. }
  354. /* Set the BtLock.eLock variable to the maximum of the current lock
  355. ** and the requested lock. This means if a write-lock was already held
  356. ** and a read-lock requested, we don't incorrectly downgrade the lock.
  357. */
  358. Debug.Assert( WRITE_LOCK>READ_LOCK );
  359. if( eLock>pLock.eLock ){
  360. pLock.eLock = eLock;
  361. }
  362. return SQLITE_OK;
  363. }
  364. #endif //* !SQLITE_OMIT_SHARED_CACHE */
  365. #if !SQLITE_OMIT_SHARED_CACHE
  366. /*
  367. ** Release all the table locks (locks obtained via calls to
  368. ** the setSharedCacheTableLock() procedure) held by Btree object p.
  369. **
  370. ** This function assumes that Btree p has an open read or write
  371. ** transaction. If it does not, then the BtShared.isPending variable
  372. ** may be incorrectly cleared.
  373. */
  374. static void clearAllSharedCacheTableLocks(Btree p){
  375. BtShared pBt = p.pBt;
  376. BtLock **ppIter = &pBt.pLock;
  377. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  378. Debug.Assert( p.sharable || 0==*ppIter );
  379. Debug.Assert( p.inTrans>0 );
  380. while( ppIter ){
  381. BtLock pLock = ppIter;
  382. Debug.Assert( pBt.isExclusive==null || pBt.pWriter==pLock.pBtree );
  383. Debug.Assert( pLock.pBtree.inTrans>=pLock.eLock );
  384. if( pLock.pBtree==p ){
  385. ppIter = pLock.pNext;
  386. Debug.Assert( pLock.iTable!=1 || pLock==&p.lock );
  387. if( pLock.iTable!=1 ){
  388. pLock=null;//sqlite3_free(ref pLock);
  389. }
  390. }else{
  391. ppIter = &pLock.pNext;
  392. }
  393. }
  394. Debug.Assert( pBt.isPending==null || pBt.pWriter );
  395. if( pBt.pWriter==p ){
  396. pBt.pWriter = 0;
  397. pBt.isExclusive = 0;
  398. pBt.isPending = 0;
  399. }else if( pBt.nTransaction==2 ){
  400. /* This function is called when Btree p is concluding its
  401. ** transaction. If there currently exists a writer, and p is not
  402. ** that writer, then the number of locks held by connections other
  403. ** than the writer must be about to drop to zero. In this case
  404. ** set the isPending flag to 0.
  405. **
  406. ** If there is not currently a writer, then BtShared.isPending must
  407. ** be zero already. So this next line is harmless in that case.
  408. */
  409. pBt.isPending = 0;
  410. }
  411. }
  412. /*
  413. ** This function changes all write-locks held by Btree p into read-locks.
  414. */
  415. static void downgradeAllSharedCacheTableLocks(Btree p){
  416. BtShared pBt = p.pBt;
  417. if( pBt.pWriter==p ){
  418. BtLock pLock;
  419. pBt.pWriter = 0;
  420. pBt.isExclusive = 0;
  421. pBt.isPending = 0;
  422. for(pLock=pBt.pLock; pLock; pLock=pLock.pNext){
  423. Debug.Assert( pLock.eLock==READ_LOCK || pLock.pBtree==p );
  424. pLock.eLock = READ_LOCK;
  425. }
  426. }
  427. }
  428. #endif //* SQLITE_OMIT_SHARED_CACHE */
  429. //static void releasePage(MemPage pPage); /* Forward reference */
  430. /*
  431. ***** This routine is used inside of assert() only ****
  432. **
  433. ** Verify that the cursor holds the mutex on its BtShared
  434. */
  435. #if SQLITE_DEBUG
  436. static bool cursorHoldsMutex( BtCursor p )
  437. {
  438. return sqlite3_mutex_held( p.pBt.mutex );
  439. }
  440. #else
  441. static bool cursorHoldsMutex(BtCursor p) { return true; }
  442. #endif
  443. #if !SQLITE_OMIT_INCRBLOB
  444. /*
  445. ** Invalidate the overflow page-list cache for cursor pCur, if any.
  446. */
  447. static void invalidateOverflowCache(BtCursor pCur){
  448. Debug.Assert( cursorHoldsMutex(pCur) );
  449. //sqlite3_free(ref pCur.aOverflow);
  450. pCur.aOverflow = null;
  451. }
  452. /*
  453. ** Invalidate the overflow page-list cache for all cursors opened
  454. ** on the shared btree structure pBt.
  455. */
  456. static void invalidateAllOverflowCache(BtShared pBt){
  457. BtCursor p;
  458. Debug.Assert( sqlite3_mutex_held(pBt.mutex) );
  459. for(p=pBt.pCursor; p!=null; p=p.pNext){
  460. invalidateOverflowCache(p);
  461. }
  462. }
  463. /*
  464. ** This function is called before modifying the contents of a table
  465. ** to invalidate any incrblob cursors that are open on the
  466. ** row or one of the rows being modified.
  467. **
  468. ** If argument isClearTable is true, then the entire contents of the
  469. ** table is about to be deleted. In this case invalidate all incrblob
  470. ** cursors open on any row within the table with root-page pgnoRoot.
  471. **
  472. ** Otherwise, if argument isClearTable is false, then the row with
  473. ** rowid iRow is being replaced or deleted. In this case invalidate
  474. ** only those incrblob cursors open on that specific row.
  475. */
  476. static void invalidateIncrblobCursors(
  477. Btree pBtree, /* The database file to check */
  478. i64 iRow, /* The rowid that might be changing */
  479. int isClearTable /* True if all rows are being deleted */
  480. ){
  481. BtCursor p;
  482. BtShared pBt = pBtree.pBt;
  483. Debug.Assert( sqlite3BtreeHoldsMutex(pBtree) );
  484. for(p=pBt.pCursor; p!=null; p=p.pNext){
  485. if( p.isIncrblobHandle && (isClearTable || p.info.nKey==iRow) ){
  486. p.eState = CURSOR_INVALID;
  487. }
  488. }
  489. }
  490. #else
  491. /* Stub functions when INCRBLOB is omitted */
  492. //#define invalidateOverflowCache(x)
  493. static void invalidateOverflowCache( BtCursor pCur )
  494. {
  495. }
  496. //#define invalidateAllOverflowCache(x)
  497. static void invalidateAllOverflowCache( BtShared pBt )
  498. {
  499. }
  500. //#define invalidateIncrblobCursors(x,y,z)
  501. static void invalidateIncrblobCursors( Btree x, i64 y, int z )
  502. {
  503. }
  504. #endif //* SQLITE_OMIT_INCRBLOB */
  505. /*
  506. ** Set bit pgno of the BtShared.pHasContent bitvec. This is called
  507. ** when a page that previously contained data becomes a free-list leaf
  508. ** page.
  509. **
  510. ** The BtShared.pHasContent bitvec exists to work around an obscure
  511. ** bug caused by the interaction of two useful IO optimizations surrounding
  512. ** free-list leaf pages:
  513. **
  514. ** 1) When all data is deleted from a page and the page becomes
  515. ** a free-list leaf page, the page is not written to the database
  516. ** (as free-list leaf pages contain no meaningful data). Sometimes
  517. ** such a page is not even journalled (as it will not be modified,
  518. ** why bother journalling it?).
  519. **
  520. ** 2) When a free-list leaf page is reused, its content is not read
  521. ** from the database or written to the journal file (why should it
  522. ** be, if it is not at all meaningful?).
  523. **
  524. ** By themselves, these optimizations work fine and provide a handy
  525. ** performance boost to bulk delete or insert operations. However, if
  526. ** a page is moved to the free-list and then reused within the same
  527. ** transaction, a problem comes up. If the page is not journalled when
  528. ** it is moved to the free-list and it is also not journalled when it
  529. ** is extracted from the free-list and reused, then the original data
  530. ** may be lost. In the event of a rollback, it may not be possible
  531. ** to restore the database to its original configuration.
  532. **
  533. ** The solution is the BtShared.pHasContent bitvec. Whenever a page is
  534. ** moved to become a free-list leaf page, the corresponding bit is
  535. ** set in the bitvec. Whenever a leaf page is extracted from the free-list,
  536. ** optimization 2 above is omitted if the corresponding bit is already
  537. ** set in BtShared.pHasContent. The contents of the bitvec are cleared
  538. ** at the end of every transaction.
  539. */
  540. static int btreeSetHasContent( BtShared pBt, Pgno pgno )
  541. {
  542. int rc = SQLITE_OK;
  543. if ( null == pBt.pHasContent )
  544. {
  545. Debug.Assert( pgno <= pBt.nPage );
  546. pBt.pHasContent = sqlite3BitvecCreate( pBt.nPage );
  547. if ( null == pBt.pHasContent )
  548. {
  549. rc = SQLITE_NOMEM;
  550. }
  551. }
  552. if ( rc == SQLITE_OK && pgno <= sqlite3BitvecSize( pBt.pHasContent ) )
  553. {
  554. rc = sqlite3BitvecSet( pBt.pHasContent, pgno );
  555. }
  556. return rc;
  557. }
  558. /*
  559. ** Query the BtShared.pHasContent vector.
  560. **
  561. ** This function is called when a free-list leaf page is removed from the
  562. ** free-list for reuse. It returns false if it is safe to retrieve the
  563. ** page from the pager layer with the 'no-content' flag set. True otherwise.
  564. */
  565. static bool btreeGetHasContent( BtShared pBt, Pgno pgno )
  566. {
  567. Bitvec p = pBt.pHasContent;
  568. return ( p != null && ( pgno > sqlite3BitvecSize( p ) || sqlite3BitvecTest( p, pgno ) != 0 ) );
  569. }
  570. /*
  571. ** Clear (destroy) the BtShared.pHasContent bitvec. This should be
  572. ** invoked at the conclusion of each write-transaction.
  573. */
  574. static void btreeClearHasContent( BtShared pBt )
  575. {
  576. sqlite3BitvecDestroy( ref pBt.pHasContent );
  577. pBt.pHasContent = null;
  578. }
  579. /*
  580. ** Save the current cursor position in the variables BtCursor.nKey
  581. ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
  582. **
  583. ** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
  584. ** prior to calling this routine.
  585. */
  586. static int saveCursorPosition( BtCursor pCur )
  587. {
  588. int rc;
  589. Debug.Assert( CURSOR_VALID == pCur.eState );
  590. Debug.Assert( null == pCur.pKey );
  591. Debug.Assert( cursorHoldsMutex( pCur ) );
  592. rc = sqlite3BtreeKeySize( pCur, ref pCur.nKey );
  593. Debug.Assert( rc == SQLITE_OK ); /* KeySize() cannot fail */
  594. /* If this is an intKey table, then the above call to BtreeKeySize()
  595. ** stores the integer key in pCur.nKey. In this case this value is
  596. ** all that is required. Otherwise, if pCur is not open on an intKey
  597. ** table, then malloc space for and store the pCur.nKey bytes of key
  598. ** data.
  599. */
  600. if ( 0 == pCur.apPage[0].intKey )
  601. {
  602. byte[] pKey = sqlite3Malloc( (int)pCur.nKey );
  603. //if( pKey !=null){
  604. rc = sqlite3BtreeKey( pCur, 0, (u32)pCur.nKey, pKey );
  605. if ( rc == SQLITE_OK )
  606. {
  607. pCur.pKey = pKey;
  608. }
  609. //else{
  610. // sqlite3_free(ref pKey);
  611. //}
  612. //}else{
  613. // rc = SQLITE_NOMEM;
  614. //}
  615. }
  616. Debug.Assert( 0 == pCur.apPage[0].intKey || null == pCur.pKey );
  617. if ( rc == SQLITE_OK )
  618. {
  619. int i;
  620. for ( i = 0; i <= pCur.iPage; i++ )
  621. {
  622. releasePage( pCur.apPage[i] );
  623. pCur.apPage[i] = null;
  624. }
  625. pCur.iPage = -1;
  626. pCur.eState = CURSOR_REQUIRESEEK;
  627. }
  628. invalidateOverflowCache( pCur );
  629. return rc;
  630. }
  631. /*
  632. ** Save the positions of all cursors (except pExcept) that are open on
  633. ** the table with root-page iRoot. Usually, this is called just before cursor
  634. ** pExcept is used to modify the table (BtreeDelete() or BtreeInsert()).
  635. */
  636. static int saveAllCursors( BtShared pBt, Pgno iRoot, BtCursor pExcept )
  637. {
  638. BtCursor p;
  639. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  640. Debug.Assert( pExcept == null || pExcept.pBt == pBt );
  641. for ( p = pBt.pCursor; p != null; p = p.pNext )
  642. {
  643. if ( p != pExcept && ( 0 == iRoot || p.pgnoRoot == iRoot ) &&
  644. p.eState == CURSOR_VALID )
  645. {
  646. int rc = saveCursorPosition( p );
  647. if ( SQLITE_OK != rc )
  648. {
  649. return rc;
  650. }
  651. }
  652. }
  653. return SQLITE_OK;
  654. }
  655. /*
  656. ** Clear the current cursor position.
  657. */
  658. static void sqlite3BtreeClearCursor( BtCursor pCur )
  659. {
  660. Debug.Assert( cursorHoldsMutex( pCur ) );
  661. sqlite3_free( ref pCur.pKey );
  662. pCur.eState = CURSOR_INVALID;
  663. }
  664. /*
  665. ** In this version of BtreeMoveto, pKey is a packed index record
  666. ** such as is generated by the OP_MakeRecord opcode. Unpack the
  667. ** record and then call BtreeMovetoUnpacked() to do the work.
  668. */
  669. static int btreeMoveto(
  670. BtCursor pCur, /* Cursor open on the btree to be searched */
  671. byte[] pKey, /* Packed key if the btree is an index */
  672. i64 nKey, /* Integer key for tables. Size of pKey for indices */
  673. int bias, /* Bias search to the high end */
  674. ref int pRes /* Write search results here */
  675. )
  676. {
  677. int rc; /* Status code */
  678. UnpackedRecord pIdxKey; /* Unpacked index key */
  679. UnpackedRecord aSpace = new UnpackedRecord();//char aSpace[150]; /* Temp space for pIdxKey - to avoid a malloc */
  680. if ( pKey != null )
  681. {
  682. Debug.Assert( nKey == (i64)(int)nKey );
  683. pIdxKey = sqlite3VdbeRecordUnpack( pCur.pKeyInfo, (int)nKey, pKey,
  684. aSpace, 16 );//sizeof( aSpace ) );
  685. if ( pIdxKey == null )
  686. return SQLITE_NOMEM;
  687. }
  688. else
  689. {
  690. pIdxKey = null;
  691. }
  692. rc = sqlite3BtreeMovetoUnpacked( pCur, pIdxKey, nKey, bias != 0 ? 1 : 0, ref pRes );
  693. if ( pKey != null )
  694. {
  695. sqlite3VdbeDeleteUnpackedRecord( pIdxKey );
  696. }
  697. return rc;
  698. }
  699. /*
  700. ** Restore the cursor to the position it was in (or as close to as possible)
  701. ** when saveCursorPosition() was called. Note that this call deletes the
  702. ** saved position info stored by saveCursorPosition(), so there can be
  703. ** at most one effective restoreCursorPosition() call after each
  704. ** saveCursorPosition().
  705. */
  706. static int btreeRestoreCursorPosition( BtCursor pCur )
  707. {
  708. int rc;
  709. Debug.Assert( cursorHoldsMutex( pCur ) );
  710. Debug.Assert( pCur.eState >= CURSOR_REQUIRESEEK );
  711. if ( pCur.eState == CURSOR_FAULT )
  712. {
  713. return pCur.skipNext;
  714. }
  715. pCur.eState = CURSOR_INVALID;
  716. rc = btreeMoveto( pCur, pCur.pKey, pCur.nKey, 0, ref pCur.skipNext );
  717. if ( rc == SQLITE_OK )
  718. {
  719. //sqlite3_free(ref pCur.pKey);
  720. pCur.pKey = null;
  721. Debug.Assert( pCur.eState == CURSOR_VALID || pCur.eState == CURSOR_INVALID );
  722. }
  723. return rc;
  724. }
  725. //#define restoreCursorPosition(p) \
  726. // (p.eState>=CURSOR_REQUIRESEEK ? \
  727. // btreeRestoreCursorPosition(p) : \
  728. // SQLITE_OK)
  729. static int restoreCursorPosition( BtCursor pCur )
  730. {
  731. if ( pCur.eState >= CURSOR_REQUIRESEEK )
  732. return btreeRestoreCursorPosition( pCur );
  733. else
  734. return SQLITE_OK;
  735. }
  736. /*
  737. ** Determine whether or not a cursor has moved from the position it
  738. ** was last placed at. Cursors can move when the row they are pointing
  739. ** at is deleted out from under them.
  740. **
  741. ** This routine returns an error code if something goes wrong. The
  742. ** integer pHasMoved is set to one if the cursor has moved and 0 if not.
  743. */
  744. static int sqlite3BtreeCursorHasMoved( BtCursor pCur, ref int pHasMoved )
  745. {
  746. int rc;
  747. rc = restoreCursorPosition( pCur );
  748. if ( rc != 0 )
  749. {
  750. pHasMoved = 1;
  751. return rc;
  752. }
  753. if ( pCur.eState != CURSOR_VALID || pCur.skipNext != 0 )
  754. {
  755. pHasMoved = 1;
  756. }
  757. else
  758. {
  759. pHasMoved = 0;
  760. }
  761. return SQLITE_OK;
  762. }
  763. #if !SQLITE_OMIT_AUTOVACUUM
  764. /*
  765. ** Given a page number of a regular database page, return the page
  766. ** number for the pointer-map page that contains the entry for the
  767. ** input page number.
  768. **
  769. ** Return 0 (not a valid page) for pgno==1 since there is
  770. ** no pointer map associated with page 1. The integrity_check logic
  771. ** requires that ptrmapPageno(*,1)!=1.
  772. */
  773. static Pgno ptrmapPageno( BtShared pBt, Pgno pgno )
  774. {
  775. int nPagesPerMapPage;
  776. Pgno iPtrMap, ret;
  777. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  778. if ( pgno < 2 )
  779. return 0;
  780. nPagesPerMapPage = (int)( pBt.usableSize / 5 + 1 );
  781. iPtrMap = (Pgno)( ( pgno - 2 ) / nPagesPerMapPage );
  782. ret = (Pgno)( iPtrMap * nPagesPerMapPage ) + 2;
  783. if ( ret == PENDING_BYTE_PAGE( pBt ) )
  784. {
  785. ret++;
  786. }
  787. return ret;
  788. }
  789. /*
  790. ** Write an entry into the pointer map.
  791. **
  792. ** This routine updates the pointer map entry for page number 'key'
  793. ** so that it maps to type 'eType' and parent page number 'pgno'.
  794. **
  795. ** If pRC is initially non-zero (non-SQLITE_OK) then this routine is
  796. ** a no-op. If an error occurs, the appropriate error code is written
  797. ** into pRC.
  798. */
  799. static void ptrmapPut( BtShared pBt, Pgno key, u8 eType, Pgno parent, ref int pRC )
  800. {
  801. PgHdr pDbPage = new PgHdr(); /* The pointer map page */
  802. u8[] pPtrmap; /* The pointer map data */
  803. Pgno iPtrmap; /* The pointer map page number */
  804. int offset; /* Offset in pointer map page */
  805. int rc; /* Return code from subfunctions */
  806. if ( pRC != 0 )
  807. return;
  808. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  809. /* The master-journal page number must never be used as a pointer map page */
  810. Debug.Assert( false == PTRMAP_ISPAGE( pBt, PENDING_BYTE_PAGE( pBt ) ) );
  811. Debug.Assert( pBt.autoVacuum );
  812. if ( key == 0 )
  813. {
  814. pRC = SQLITE_CORRUPT_BKPT();
  815. return;
  816. }
  817. iPtrmap = PTRMAP_PAGENO( pBt, key );
  818. rc = sqlite3PagerGet( pBt.pPager, iPtrmap, ref pDbPage );
  819. if ( rc != SQLITE_OK )
  820. {
  821. pRC = rc;
  822. return;
  823. }
  824. offset = (int)PTRMAP_PTROFFSET( iPtrmap, key );
  825. if ( offset < 0 )
  826. {
  827. pRC = SQLITE_CORRUPT_BKPT();
  828. goto ptrmap_exit;
  829. }
  830. pPtrmap = sqlite3PagerGetData( pDbPage );
  831. if ( eType != pPtrmap[offset] || sqlite3Get4byte( pPtrmap, offset + 1 ) != parent )
  832. {
  833. TRACE( "PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent );
  834. pRC = rc = sqlite3PagerWrite( pDbPage );
  835. if ( rc == SQLITE_OK )
  836. {
  837. pPtrmap[offset] = eType;
  838. sqlite3Put4byte( pPtrmap, offset + 1, parent );
  839. }
  840. }
  841. ptrmap_exit:
  842. sqlite3PagerUnref( pDbPage );
  843. }
  844. /*
  845. ** Read an entry from the pointer map.
  846. **
  847. ** This routine retrieves the pointer map entry for page 'key', writing
  848. ** the type and parent page number to pEType and pPgno respectively.
  849. ** An error code is returned if something goes wrong, otherwise SQLITE_OK.
  850. */
  851. static int ptrmapGet( BtShared pBt, Pgno key, ref u8 pEType, ref Pgno pPgno )
  852. {
  853. PgHdr pDbPage = new PgHdr();/* The pointer map page */
  854. int iPtrmap; /* Pointer map page index */
  855. u8[] pPtrmap; /* Pointer map page data */
  856. int offset; /* Offset of entry in pointer map */
  857. int rc;
  858. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  859. iPtrmap = (int)PTRMAP_PAGENO( pBt, key );
  860. rc = sqlite3PagerGet( pBt.pPager, (u32)iPtrmap, ref pDbPage );
  861. if ( rc != 0 )
  862. {
  863. return rc;
  864. }
  865. pPtrmap = sqlite3PagerGetData( pDbPage );
  866. offset = (int)PTRMAP_PTROFFSET( (u32)iPtrmap, key );
  867. // Under C# pEType will always exist. No need to test; //
  868. //Debug.Assert( pEType != 0 );
  869. pEType = pPtrmap[offset];
  870. // Under C# pPgno will always exist. No need to test; //
  871. //if ( pPgno != 0 )
  872. pPgno = sqlite3Get4byte( pPtrmap, offset + 1 );
  873. sqlite3PagerUnref( pDbPage );
  874. if ( pEType < 1 || pEType > 5 )
  875. return SQLITE_CORRUPT_BKPT();
  876. return SQLITE_OK;
  877. }
  878. #else //* if defined SQLITE_OMIT_AUTOVACUUM */
  879. //#define ptrmapPut(w,x,y,z,rc)
  880. //#define ptrmapGet(w,x,y,z) SQLITE_OK
  881. //#define ptrmapPutOvflPtr(x, y, rc)
  882. #endif
  883. /*
  884. ** Given a btree page and a cell index (0 means the first cell on
  885. ** the page, 1 means the second cell, and so forth) return a pointer
  886. ** to the cell content.
  887. **
  888. ** This routine works only for pages that do not contain overflow cells.
  889. */
  890. //#define findCell(P,I) \
  891. // ((P).aData + ((P).maskPage & get2byte((P).aData[(P).cellOffset+2*(I)])))
  892. static int findCell( MemPage pPage, int iCell )
  893. {
  894. return get2byte( pPage.aData, pPage.cellOffset + 2 * ( iCell ) );
  895. }
  896. /*
  897. ** This a more complex version of findCell() that works for
  898. ** pages that do contain overflow cells.
  899. */
  900. static int findOverflowCell( MemPage pPage, int iCell )
  901. {
  902. int i;
  903. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  904. for ( i = pPage.nOverflow - 1; i >= 0; i-- )
  905. {
  906. int k;
  907. _OvflCell pOvfl;
  908. pOvfl = pPage.aOvfl[i];
  909. k = pOvfl.idx;
  910. if ( k <= iCell )
  911. {
  912. if ( k == iCell )
  913. {
  914. //return pOvfl.pCell;
  915. return -i - 1; // Negative Offset means overflow cells
  916. }
  917. iCell--;
  918. }
  919. }
  920. return findCell( pPage, iCell );
  921. }
  922. /*
  923. ** Parse a cell content block and fill in the CellInfo structure. There
  924. ** are two versions of this function. btreeParseCell() takes a
  925. ** cell index as the second argument and btreeParseCellPtr()
  926. ** takes a pointer to the body of the cell as its second argument.
  927. **
  928. ** Within this file, the parseCell() macro can be called instead of
  929. ** btreeParseCellPtr(). Using some compilers, this will be faster.
  930. */
  931. //OVERLOADS
  932. static void btreeParseCellPtr(
  933. MemPage pPage, /* Page containing the cell */
  934. int iCell, /* Pointer to the cell text. */
  935. ref CellInfo pInfo /* Fill in this structure */
  936. )
  937. {
  938. btreeParseCellPtr( pPage, pPage.aData, iCell, ref pInfo );
  939. }
  940. static void btreeParseCellPtr(
  941. MemPage pPage, /* Page containing the cell */
  942. byte[] pCell, /* The actual data */
  943. ref CellInfo pInfo /* Fill in this structure */
  944. )
  945. {
  946. btreeParseCellPtr( pPage, pCell, 0, ref pInfo );
  947. }
  948. static void btreeParseCellPtr(
  949. MemPage pPage, /* Page containing the cell */
  950. u8[] pCell, /* Pointer to the cell text. */
  951. int iCell, /* Pointer to the cell text. */
  952. ref CellInfo pInfo /* Fill in this structure */
  953. )
  954. {
  955. u16 n; /* Number bytes in cell content header */
  956. u32 nPayload = 0; /* Number of bytes of cell payload */
  957. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  958. if ( pInfo.pCell != pCell )
  959. pInfo.pCell = pCell;
  960. pInfo.iCell = iCell;
  961. Debug.Assert( pPage.leaf == 0 || pPage.leaf == 1 );
  962. n = pPage.childPtrSize;
  963. Debug.Assert( n == 4 - 4 * pPage.leaf );
  964. if ( pPage.intKey != 0 )
  965. {
  966. if ( pPage.hasData != 0 )
  967. {
  968. n += (u16)getVarint32( pCell, iCell + n, ref nPayload );
  969. }
  970. else
  971. {
  972. nPayload = 0;
  973. }
  974. n += (u16)getVarint( pCell, iCell + n, ref pInfo.nKey );
  975. pInfo.nData = nPayload;
  976. }
  977. else
  978. {
  979. pInfo.nData = 0;
  980. n += (u16)getVarint32( pCell, iCell + n, ref nPayload );
  981. pInfo.nKey = nPayload;
  982. }
  983. pInfo.nPayload = nPayload;
  984. pInfo.nHeader = n;
  985. testcase( nPayload == pPage.maxLocal );
  986. testcase( nPayload == pPage.maxLocal + 1 );
  987. if ( likely( nPayload <= pPage.maxLocal ) )
  988. {
  989. /* This is the (easy) common case where the entire payload fits
  990. ** on the local page. No overflow is required.
  991. */
  992. if ( ( pInfo.nSize = (u16)( n + nPayload ) ) < 4 )
  993. pInfo.nSize = 4;
  994. pInfo.nLocal = (u16)nPayload;
  995. pInfo.iOverflow = 0;
  996. }
  997. else
  998. {
  999. /* If the payload will not fit completely on the local page, we have
  1000. ** to decide how much to store locally and how much to spill onto
  1001. ** overflow pages. The strategy is to minimize the amount of unused
  1002. ** space on overflow pages while keeping the amount of local storage
  1003. ** in between minLocal and maxLocal.
  1004. **
  1005. ** Warning: changing the way overflow payload is distributed in any
  1006. ** way will result in an incompatible file format.
  1007. */
  1008. int minLocal; /* Minimum amount of payload held locally */
  1009. int maxLocal; /* Maximum amount of payload held locally */
  1010. int surplus; /* Overflow payload available for local storage */
  1011. minLocal = pPage.minLocal;
  1012. maxLocal = pPage.maxLocal;
  1013. surplus = (int)( minLocal + ( nPayload - minLocal ) % ( pPage.pBt.usableSize - 4 ) );
  1014. testcase( surplus == maxLocal );
  1015. testcase( surplus == maxLocal + 1 );
  1016. if ( surplus <= maxLocal )
  1017. {
  1018. pInfo.nLocal = (u16)surplus;
  1019. }
  1020. else
  1021. {
  1022. pInfo.nLocal = (u16)minLocal;
  1023. }
  1024. pInfo.iOverflow = (u16)( pInfo.nLocal + n );
  1025. pInfo.nSize = (u16)( pInfo.iOverflow + 4 );
  1026. }
  1027. }
  1028. //#define parseCell(pPage, iCell, pInfo) \
  1029. // btreeParseCellPtr((pPage), findCell((pPage), (iCell)), (pInfo))
  1030. static void parseCell( MemPage pPage, int iCell, ref CellInfo pInfo )
  1031. {
  1032. btreeParseCellPtr( pPage, findCell( pPage, iCell ), ref pInfo );
  1033. }
  1034. static void btreeParseCell(
  1035. MemPage pPage, /* Page containing the cell */
  1036. int iCell, /* The cell index. First cell is 0 */
  1037. ref CellInfo pInfo /* Fill in this structure */
  1038. )
  1039. {
  1040. parseCell( pPage, iCell, ref pInfo );
  1041. }
  1042. /*
  1043. ** Compute the total number of bytes that a Cell needs in the cell
  1044. ** data area of the btree-page. The return number includes the cell
  1045. ** data header and the local payload, but not any overflow page or
  1046. ** the space used by the cell pointer.
  1047. */
  1048. // Alternative form for C#
  1049. static u16 cellSizePtr( MemPage pPage, int iCell )
  1050. {
  1051. CellInfo info = new CellInfo();
  1052. byte[] pCell = new byte[13];
  1053. // Minimum Size = (2 bytes of Header or (4) Child Pointer) + (maximum of) 9 bytes data
  1054. if ( iCell < 0 )// Overflow Cell
  1055. Buffer.BlockCopy( pPage.aOvfl[-( iCell + 1 )].pCell, 0, pCell, 0, pCell.Length < pPage.aOvfl[-( iCell + 1 )].pCell.Length ? pCell.Length : pPage.aOvfl[-( iCell + 1 )].pCell.Length );
  1056. else if ( iCell >= pPage.aData.Length + 1 - pCell.Length )
  1057. Buffer.BlockCopy( pPage.aData, iCell, pCell, 0, pPage.aData.Length - iCell );
  1058. else
  1059. Buffer.BlockCopy( pPage.aData, iCell, pCell, 0, pCell.Length );
  1060. btreeParseCellPtr( pPage, pCell, ref info );
  1061. return info.nSize;
  1062. }
  1063. // Alternative form for C#
  1064. static u16 cellSizePtr( MemPage pPage, byte[] pCell, int offset )
  1065. {
  1066. CellInfo info = new CellInfo();
  1067. info.pCell = sqlite3Malloc( pCell.Length );
  1068. Buffer.BlockCopy( pCell, offset, info.pCell, 0, pCell.Length - offset );
  1069. btreeParseCellPtr( pPage, info.pCell, ref info );
  1070. return info.nSize;
  1071. }
  1072. static u16 cellSizePtr( MemPage pPage, u8[] pCell )
  1073. {
  1074. int _pIter = pPage.childPtrSize; //u8 pIter = &pCell[pPage.childPtrSize];
  1075. u32 nSize = 0;
  1076. #if SQLITE_DEBUG || DEBUG
  1077. /* The value returned by this function should always be the same as
  1078. ** the (CellInfo.nSize) value found by doing a full parse of the
  1079. ** cell. If SQLITE_DEBUG is defined, an Debug.Assert() at the bottom of
  1080. ** this function verifies that this invariant is not violated. */
  1081. CellInfo debuginfo = new CellInfo();
  1082. btreeParseCellPtr( pPage, pCell, ref debuginfo );
  1083. #else
  1084. CellInfo debuginfo = new CellInfo();
  1085. #endif
  1086. if ( pPage.intKey != 0 )
  1087. {
  1088. int pEnd;
  1089. if ( pPage.hasData != 0 )
  1090. {
  1091. _pIter += getVarint32( pCell, ref nSize );// pIter += getVarint32( pIter, ref nSize );
  1092. }
  1093. else
  1094. {
  1095. nSize = 0;
  1096. }
  1097. /* pIter now points at the 64-bit integer key value, a variable length
  1098. ** integer. The following block moves pIter to point at the first byte
  1099. ** past the end of the key value. */
  1100. pEnd = _pIter + 9;//pEnd = &pIter[9];
  1101. while ( ( ( pCell[_pIter++] ) & 0x80 ) != 0 && _pIter < pEnd )
  1102. ;//while( (pIter++)&0x80 && pIter<pEnd );
  1103. }
  1104. else
  1105. {
  1106. _pIter += getVarint32( pCell, _pIter, ref nSize ); //pIter += getVarint32( pIter, ref nSize );
  1107. }
  1108. testcase( nSize == pPage.maxLocal );
  1109. testcase( nSize == pPage.maxLocal + 1 );
  1110. if ( nSize > pPage.maxLocal )
  1111. {
  1112. int minLocal = pPage.minLocal;
  1113. nSize = (u32)( minLocal + ( nSize - minLocal ) % ( pPage.pBt.usableSize - 4 ) );
  1114. testcase( nSize == pPage.maxLocal );
  1115. testcase( nSize == pPage.maxLocal + 1 );
  1116. if ( nSize > pPage.maxLocal )
  1117. {
  1118. nSize = (u32)minLocal;
  1119. }
  1120. nSize += 4;
  1121. }
  1122. nSize += (uint)_pIter;//nSize += (u32)(pIter - pCell);
  1123. /* The minimum size of any cell is 4 bytes. */
  1124. if ( nSize < 4 )
  1125. {
  1126. nSize = 4;
  1127. }
  1128. Debug.Assert( nSize == debuginfo.nSize );
  1129. return (u16)nSize;
  1130. }
  1131. #if SQLITE_DEBUG
  1132. /* This variation on cellSizePtr() is used inside of assert() statements
  1133. ** only. */
  1134. static u16 cellSize( MemPage pPage, int iCell )
  1135. {
  1136. return cellSizePtr( pPage, findCell( pPage, iCell ) );
  1137. }
  1138. #else
  1139. static int cellSize(MemPage pPage, int iCell) { return -1; }
  1140. #endif
  1141. #if !SQLITE_OMIT_AUTOVACUUM
  1142. /*
  1143. ** If the cell pCell, part of page pPage contains a pointer
  1144. ** to an overflow page, insert an entry into the pointer-map
  1145. ** for the overflow page.
  1146. */
  1147. static void ptrmapPutOvflPtr( MemPage pPage, int pCell, ref int pRC )
  1148. {
  1149. if ( pRC != 0 )
  1150. return;
  1151. CellInfo info = new CellInfo();
  1152. Debug.Assert( pCell != 0 );
  1153. btreeParseCellPtr( pPage, pCell, ref info );
  1154. Debug.Assert( ( info.nData + ( pPage.intKey != 0 ? 0 : info.nKey ) ) == info.nPayload );
  1155. if ( info.iOverflow != 0 )
  1156. {
  1157. Pgno ovfl = sqlite3Get4byte( pPage.aData, pCell, info.iOverflow );
  1158. ptrmapPut( pPage.pBt, ovfl, PTRMAP_OVERFLOW1, pPage.pgno, ref pRC );
  1159. }
  1160. }
  1161. static void ptrmapPutOvflPtr( MemPage pPage, u8[] pCell, ref int pRC )
  1162. {
  1163. if ( pRC != 0 )
  1164. return;
  1165. CellInfo info = new CellInfo();
  1166. Debug.Assert( pCell != null );
  1167. btreeParseCellPtr( pPage, pCell, ref info );
  1168. Debug.Assert( ( info.nData + ( pPage.intKey != 0 ? 0 : info.nKey ) ) == info.nPayload );
  1169. if ( info.iOverflow != 0 )
  1170. {
  1171. Pgno ovfl = sqlite3Get4byte( pCell, info.iOverflow );
  1172. ptrmapPut( pPage.pBt, ovfl, PTRMAP_OVERFLOW1, pPage.pgno, ref pRC );
  1173. }
  1174. }
  1175. #endif
  1176. /*
  1177. ** Defragment the page given. All Cells are moved to the
  1178. ** end of the page and all free space is collected into one
  1179. ** big FreeBlk that occurs in between the header and cell
  1180. ** pointer array and the cell content area.
  1181. */
  1182. static int defragmentPage( MemPage pPage )
  1183. {
  1184. int i; /* Loop counter */
  1185. int pc; /* Address of a i-th cell */
  1186. int addr; /* Offset of first byte after cell pointer array */
  1187. int hdr; /* Offset to the page header */
  1188. int size; /* Size of a cell */
  1189. int usableSize; /* Number of usable bytes on a page */
  1190. int cellOffset; /* Offset to the cell pointer array */
  1191. int cbrk; /* Offset to the cell content area */
  1192. int nCell; /* Number of cells on the page */
  1193. byte[] data; /* The page data */
  1194. byte[] temp; /* Temp area for cell content */
  1195. int iCellFirst; /* First allowable cell index */
  1196. int iCellLast; /* Last possible cell index */
  1197. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1198. Debug.Assert( pPage.pBt != null );
  1199. Debug.Assert( pPage.pBt.usableSize <= SQLITE_MAX_PAGE_SIZE );
  1200. Debug.Assert( pPage.nOverflow == 0 );
  1201. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  1202. temp = sqlite3PagerTempSpace( pPage.pBt.pPager );
  1203. data = pPage.aData;
  1204. hdr = pPage.hdrOffset;
  1205. cellOffset = pPage.cellOffset;
  1206. nCell = pPage.nCell;
  1207. Debug.Assert( nCell == get2byte( data, hdr + 3 ) );
  1208. usableSize = (int)pPage.pBt.usableSize;
  1209. cbrk = get2byte( data, hdr + 5 );
  1210. Buffer.BlockCopy( data, cbrk, temp, cbrk, usableSize - cbrk );//memcpy( temp[cbrk], ref data[cbrk], usableSize - cbrk );
  1211. cbrk = usableSize;
  1212. iCellFirst = cellOffset + 2 * nCell;
  1213. iCellLast = usableSize - 4;
  1214. for ( i = 0; i < nCell; i++ )
  1215. {
  1216. int pAddr; /* The i-th cell pointer */
  1217. pAddr = cellOffset + i * 2; // &data[cellOffset + i * 2];
  1218. pc = get2byte( data, pAddr );
  1219. testcase( pc == iCellFirst );
  1220. testcase( pc == iCellLast );
  1221. #if !(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
  1222. /* These conditions have already been verified in btreeInitPage()
  1223. ** if SQLITE_ENABLE_OVERSIZE_CELL_CHECK is defined
  1224. */
  1225. if ( pc < iCellFirst || pc > iCellLast )
  1226. {
  1227. return SQLITE_CORRUPT_BKPT();
  1228. }
  1229. #endif
  1230. Debug.Assert( pc >= iCellFirst && pc <= iCellLast );
  1231. size = cellSizePtr( pPage, temp, pc );
  1232. cbrk -= size;
  1233. #if (SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
  1234. if ( cbrk < iCellFirst )
  1235. {
  1236. return SQLITE_CORRUPT_BKPT();
  1237. }
  1238. #else
  1239. if ( cbrk < iCellFirst || pc + size > usableSize )
  1240. {
  1241. return SQLITE_CORRUPT_BKPT();
  1242. }
  1243. #endif
  1244. Debug.Assert( cbrk + size <= usableSize && cbrk >= iCellFirst );
  1245. testcase( cbrk + size == usableSize );
  1246. testcase( pc + size == usableSize );
  1247. Buffer.BlockCopy( temp, pc, data, cbrk, size );//memcpy(data[cbrk], ref temp[pc], size);
  1248. put2byte( data, pAddr, cbrk );
  1249. }
  1250. Debug.Assert( cbrk >= iCellFirst );
  1251. put2byte( data, hdr + 5, cbrk );
  1252. data[hdr + 1] = 0;
  1253. data[hdr + 2] = 0;
  1254. data[hdr + 7] = 0;
  1255. addr = cellOffset + 2 * nCell;
  1256. Array.Clear( data, addr, cbrk - addr ); //memset(data[iCellFirst], 0, cbrk-iCellFirst);
  1257. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1258. if ( cbrk - iCellFirst != pPage.nFree )
  1259. {
  1260. return SQLITE_CORRUPT_BKPT();
  1261. }
  1262. return SQLITE_OK;
  1263. }
  1264. /*
  1265. ** Allocate nByte bytes of space from within the B-Tree page passed
  1266. ** as the first argument. Write into pIdx the index into pPage.aData[]
  1267. ** of the first byte of allocated space. Return either SQLITE_OK or
  1268. ** an error code (usually SQLITE_CORRUPT).
  1269. **
  1270. ** The caller guarantees that there is sufficient space to make the
  1271. ** allocation. This routine might need to defragment in order to bring
  1272. ** all the space together, however. This routine will avoid using
  1273. ** the first two bytes past the cell pointer area since presumably this
  1274. ** allocation is being made in order to insert a new cell, so we will
  1275. ** also end up needing a new cell pointer.
  1276. */
  1277. static int allocateSpace( MemPage pPage, int nByte, ref int pIdx )
  1278. {
  1279. int hdr = pPage.hdrOffset; /* Local cache of pPage.hdrOffset */
  1280. u8[] data = pPage.aData; /* Local cache of pPage.aData */
  1281. int nFrag; /* Number of fragmented bytes on pPage */
  1282. int top; /* First byte of cell content area */
  1283. int gap; /* First byte of gap between cell pointers and cell content */
  1284. int rc; /* Integer return code */
  1285. u32 usableSize; /* Usable size of the page */
  1286. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1287. Debug.Assert( pPage.pBt != null );
  1288. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  1289. Debug.Assert( nByte >= 0 ); /* Minimum cell size is 4 */
  1290. Debug.Assert( pPage.nFree >= nByte );
  1291. Debug.Assert( pPage.nOverflow == 0 );
  1292. usableSize = pPage.pBt.usableSize;
  1293. Debug.Assert( nByte < usableSize - 8 );
  1294. nFrag = data[hdr + 7];
  1295. Debug.Assert( pPage.cellOffset == hdr + 12 - 4 * pPage.leaf );
  1296. gap = pPage.cellOffset + 2 * pPage.nCell;
  1297. top = get2byteNotZero( data, hdr + 5 );
  1298. if ( gap > top )
  1299. return SQLITE_CORRUPT_BKPT();
  1300. testcase( gap + 2 == top );
  1301. testcase( gap + 1 == top );
  1302. testcase( gap == top );
  1303. if ( nFrag >= 60 )
  1304. {
  1305. /* Always defragment highly fragmented pages */
  1306. rc = defragmentPage( pPage );
  1307. if ( rc != 0 )
  1308. return rc;
  1309. top = get2byteNotZero( data, hdr + 5 );
  1310. }
  1311. else if ( gap + 2 <= top )
  1312. {
  1313. /* Search the freelist looking for a free slot big enough to satisfy
  1314. ** the request. The allocation is made from the first free slot in
  1315. ** the list that is large enough to accomadate it.
  1316. */
  1317. int pc, addr;
  1318. for ( addr = hdr + 1; ( pc = get2byte( data, addr ) ) > 0; addr = pc )
  1319. {
  1320. int size; /* Size of free slot */
  1321. if ( pc > usableSize - 4 || pc < addr + 4 )
  1322. {
  1323. return SQLITE_CORRUPT_BKPT();
  1324. }
  1325. size = get2byte( data, pc + 2 );
  1326. if ( size >= nByte )
  1327. {
  1328. int x = size - nByte;
  1329. testcase( x == 4 );
  1330. testcase( x == 3 );
  1331. if ( x < 4 )
  1332. {
  1333. /* Remove the slot from the free-list. Update the number of
  1334. ** fragmented bytes within the page. */
  1335. data[addr + 0] = data[pc + 0];
  1336. data[addr + 1] = data[pc + 1]; //memcpy( data[addr], ref data[pc], 2 );
  1337. data[hdr + 7] = (u8)( nFrag + x );
  1338. }
  1339. else if ( size + pc > usableSize )
  1340. {
  1341. return SQLITE_CORRUPT_BKPT();
  1342. }
  1343. else
  1344. {
  1345. /* The slot remains on the free-list. Reduce its size to account
  1346. ** for the portion used by the new allocation. */
  1347. put2byte( data, pc + 2, x );
  1348. }
  1349. pIdx = pc + x;
  1350. return SQLITE_OK;
  1351. }
  1352. }
  1353. }
  1354. /* Check to make sure there is enough space in the gap to satisfy
  1355. ** the allocation. If not, defragment.
  1356. */
  1357. testcase( gap + 2 + nByte == top );
  1358. if ( gap + 2 + nByte > top )
  1359. {
  1360. rc = defragmentPage( pPage );
  1361. if ( rc != 0 )
  1362. return rc;
  1363. top = get2byteNotZero( data, hdr + 5 );
  1364. Debug.Asser

Large files files are truncated, but you can click here to view the full file