PageRenderTime 88ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 1ms

/Community.CsharpSqlite/src/btree_c.cs

https://bitbucket.org/eumario/csharp-sqlite
C# | 9563 lines | 6185 code | 606 blank | 2772 comment | 1866 complexity | d152abc1eaf5724f7b765226a992bc06 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

  1. using System;
  2. using System.Diagnostics;
  3. using System.Text;
  4. using i64 = System.Int64;
  5. using u8 = System.Byte;
  6. using u16 = System.UInt16;
  7. using u32 = System.UInt32;
  8. using u64 = System.UInt64;
  9. using sqlite3_int64 = System.Int64;
  10. using Pgno = System.UInt32;
  11. namespace Community.CsharpSqlite
  12. {
  13. using DbPage = Sqlite3.PgHdr;
  14. public partial class Sqlite3
  15. {
  16. /*
  17. ** 2004 April 6
  18. **
  19. ** The author disclaims copyright to this source code. In place of
  20. ** a legal notice, here is a blessing:
  21. **
  22. ** May you do good and not evil.
  23. ** May you find forgiveness for yourself and forgive others.
  24. ** May you share freely, never taking more than you give.
  25. **
  26. ** This file implements a external (disk-based) database using BTrees.
  27. ** See the header comment on "btreeInt.h" for additional information.
  28. ** Including a description of file format and an overview of operation.
  29. *************************************************************************
  30. ** Included in SQLite3 port to C#-SQLite; 2008 Noah B Hart
  31. ** C#-SQLite is an independent reimplementation of the SQLite software library
  32. **
  33. ** SQLITE_SOURCE_ID: 2011-06-23 19:49:22 4374b7e83ea0a3fbc3691f9c0c936272862f32f2
  34. **
  35. *************************************************************************
  36. */
  37. //#include "btreeInt.h"
  38. /*
  39. ** The header string that appears at the beginning of every
  40. ** SQLite database.
  41. */
  42. static byte[] zMagicHeader = Encoding.UTF8.GetBytes( SQLITE_FILE_HEADER );
  43. /*
  44. ** Set this global variable to 1 to enable tracing using the TRACE
  45. ** macro.
  46. */
  47. #if TRACE
  48. static bool sqlite3BtreeTrace=false; /* True to enable tracing */
  49. //# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);}
  50. static void TRACE(string X, params object[] ap) { if (sqlite3BtreeTrace) printf(X, ap); }
  51. #else
  52. //# define TRACE(X)
  53. static void TRACE( string X, params object[] ap )
  54. {
  55. }
  56. #endif
  57. /*
  58. ** Extract a 2-byte big-endian integer from an array of unsigned bytes.
  59. ** But if the value is zero, make it 65536.
  60. **
  61. ** This routine is used to extract the "offset to cell content area" value
  62. ** from the header of a btree page. If the page size is 65536 and the page
  63. ** is empty, the offset should be 65536, but the 2-byte value stores zero.
  64. ** This routine makes the necessary adjustment to 65536.
  65. */
  66. //#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1)
  67. static int get2byteNotZero( byte[] X, int offset )
  68. {
  69. return ( ( ( ( (int)get2byte( X, offset ) ) - 1 ) & 0xffff ) + 1 );
  70. }
  71. #if !SQLITE_OMIT_SHARED_CACHE
  72. /*
  73. ** A list of BtShared objects that are eligible for participation
  74. ** in shared cache. This variable has file scope during normal builds,
  75. ** but the test harness needs to access it so we make it global for
  76. ** test builds.
  77. **
  78. ** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER.
  79. */
  80. #if SQLITE_TEST
  81. BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
  82. #else
  83. static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
  84. #endif
  85. #endif //* SQLITE_OMIT_SHARED_CACHE */
  86. #if !SQLITE_OMIT_SHARED_CACHE
  87. /*
  88. ** Enable or disable the shared pager and schema features.
  89. **
  90. ** This routine has no effect on existing database connections.
  91. ** The shared cache setting effects only future calls to
  92. ** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
  93. */
  94. int sqlite3_enable_shared_cache(int enable){
  95. sqlite3GlobalConfig.sharedCacheEnabled = enable;
  96. return SQLITE_OK;
  97. }
  98. #endif
  99. #if SQLITE_OMIT_SHARED_CACHE
  100. /*
  101. ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(),
  102. ** and clearAllSharedCacheTableLocks()
  103. ** manipulate entries in the BtShared.pLock linked list used to store
  104. ** shared-cache table level locks. If the library is compiled with the
  105. ** shared-cache feature disabled, then there is only ever one user
  106. ** of each BtShared structure and so this locking is not necessary.
  107. ** So define the lock related functions as no-ops.
  108. */
  109. //#define querySharedCacheTableLock(a,b,c) SQLITE_OK
  110. static int querySharedCacheTableLock( Btree p, Pgno iTab, u8 eLock )
  111. {
  112. return SQLITE_OK;
  113. }
  114. //#define setSharedCacheTableLock(a,b,c) SQLITE_OK
  115. //#define clearAllSharedCacheTableLocks(a)
  116. static void clearAllSharedCacheTableLocks( Btree a )
  117. {
  118. }
  119. //#define downgradeAllSharedCacheTableLocks(a)
  120. static void downgradeAllSharedCacheTableLocks( Btree a )
  121. {
  122. }
  123. //#define hasSharedCacheTableLock(a,b,c,d) 1
  124. static bool hasSharedCacheTableLock( Btree a, Pgno b, int c, int d )
  125. {
  126. return true;
  127. }
  128. //#define hasReadConflicts(a, b) 0
  129. static bool hasReadConflicts( Btree a, Pgno b )
  130. {
  131. return false;
  132. }
  133. #endif
  134. #if !SQLITE_OMIT_SHARED_CACHE
  135. #if SQLITE_DEBUG
  136. /*
  137. **** This function is only used as part of an assert() statement. ***
  138. **
  139. ** Check to see if pBtree holds the required locks to read or write to the
  140. ** table with root page iRoot. Return 1 if it does and 0 if not.
  141. **
  142. ** For example, when writing to a table with root-page iRoot via
  143. ** Btree connection pBtree:
  144. **
  145. ** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) );
  146. **
  147. ** When writing to an index that resides in a sharable database, the
  148. ** caller should have first obtained a lock specifying the root page of
  149. ** the corresponding table. This makes things a bit more complicated,
  150. ** as this module treats each table as a separate structure. To determine
  151. ** the table corresponding to the index being written, this
  152. ** function has to search through the database schema.
  153. **
  154. ** Instead of a lock on the table/index rooted at page iRoot, the caller may
  155. ** hold a write-lock on the schema table (root page 1). This is also
  156. ** acceptable.
  157. */
  158. static int hasSharedCacheTableLock(
  159. Btree pBtree, /* Handle that must hold lock */
  160. Pgno iRoot, /* Root page of b-tree */
  161. int isIndex, /* True if iRoot is the root of an index b-tree */
  162. int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */
  163. ){
  164. Schema pSchema = (Schema *)pBtree.pBt.pSchema;
  165. Pgno iTab = 0;
  166. BtLock pLock;
  167. /* If this database is not shareable, or if the client is reading
  168. ** and has the read-uncommitted flag set, then no lock is required.
  169. ** Return true immediately.
  170. */
  171. if( (pBtree.sharable==null)
  172. || (eLockType==READ_LOCK && (pBtree.db.flags & SQLITE_ReadUncommitted))
  173. ){
  174. return 1;
  175. }
  176. /* If the client is reading or writing an index and the schema is
  177. ** not loaded, then it is too difficult to actually check to see if
  178. ** the correct locks are held. So do not bother - just return true.
  179. ** This case does not come up very often anyhow.
  180. */
  181. if( isIndex && (!pSchema || (pSchema->flags&DB_SchemaLoaded)==0) ){
  182. return 1;
  183. }
  184. /* Figure out the root-page that the lock should be held on. For table
  185. ** b-trees, this is just the root page of the b-tree being read or
  186. ** written. For index b-trees, it is the root page of the associated
  187. ** table. */
  188. if( isIndex ){
  189. HashElem p;
  190. for(p=sqliteHashFirst(pSchema.idxHash); p!=null; p=sqliteHashNext(p)){
  191. Index pIdx = (Index *)sqliteHashData(p);
  192. if( pIdx.tnum==(int)iRoot ){
  193. iTab = pIdx.pTable.tnum;
  194. }
  195. }
  196. }else{
  197. iTab = iRoot;
  198. }
  199. /* Search for the required lock. Either a write-lock on root-page iTab, a
  200. ** write-lock on the schema table, or (if the client is reading) a
  201. ** read-lock on iTab will suffice. Return 1 if any of these are found. */
  202. for(pLock=pBtree.pBt.pLock; pLock; pLock=pLock.pNext){
  203. if( pLock.pBtree==pBtree
  204. && (pLock.iTable==iTab || (pLock.eLock==WRITE_LOCK && pLock.iTable==1))
  205. && pLock.eLock>=eLockType
  206. ){
  207. return 1;
  208. }
  209. }
  210. /* Failed to find the required lock. */
  211. return 0;
  212. }
  213. #endif //* SQLITE_DEBUG */
  214. #if SQLITE_DEBUG
  215. /*
  216. ** This function may be used as part of assert() statements only. ****
  217. **
  218. ** Return true if it would be illegal for pBtree to write into the
  219. ** table or index rooted at iRoot because other shared connections are
  220. ** simultaneously reading that same table or index.
  221. **
  222. ** It is illegal for pBtree to write if some other Btree object that
  223. ** shares the same BtShared object is currently reading or writing
  224. ** the iRoot table. Except, if the other Btree object has the
  225. ** read-uncommitted flag set, then it is OK for the other object to
  226. ** have a read cursor.
  227. **
  228. ** For example, before writing to any part of the table or index
  229. ** rooted at page iRoot, one should call:
  230. **
  231. ** assert( !hasReadConflicts(pBtree, iRoot) );
  232. */
  233. static int hasReadConflicts(Btree pBtree, Pgno iRoot){
  234. BtCursor p;
  235. for(p=pBtree.pBt.pCursor; p!=null; p=p.pNext){
  236. if( p.pgnoRoot==iRoot
  237. && p.pBtree!=pBtree
  238. && 0==(p.pBtree.db.flags & SQLITE_ReadUncommitted)
  239. ){
  240. return 1;
  241. }
  242. }
  243. return 0;
  244. }
  245. #endif //* #if SQLITE_DEBUG */
  246. /*
  247. ** Query to see if Btree handle p may obtain a lock of type eLock
  248. ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return
  249. ** SQLITE_OK if the lock may be obtained (by calling
  250. ** setSharedCacheTableLock()), or SQLITE_LOCKED if not.
  251. */
  252. static int querySharedCacheTableLock(Btree p, Pgno iTab, u8 eLock){
  253. BtShared pBt = p.pBt;
  254. BtLock pIter;
  255. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  256. Debug.Assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
  257. Debug.Assert( p.db!=null );
  258. Debug.Assert( !(p.db.flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 );
  259. /* If requesting a write-lock, then the Btree must have an open write
  260. ** transaction on this file. And, obviously, for this to be so there
  261. ** must be an open write transaction on the file itself.
  262. */
  263. Debug.Assert( eLock==READ_LOCK || (p==pBt.pWriter && p.inTrans==TRANS_WRITE) );
  264. Debug.Assert( eLock==READ_LOCK || pBt.inTransaction==TRANS_WRITE );
  265. /* This routine is a no-op if the shared-cache is not enabled */
  266. if( !p.sharable ){
  267. return SQLITE_OK;
  268. }
  269. /* If some other connection is holding an exclusive lock, the
  270. ** requested lock may not be obtained.
  271. */
  272. if( pBt.pWriter!=p && pBt.isExclusive ){
  273. sqlite3ConnectionBlocked(p.db, pBt.pWriter.db);
  274. return SQLITE_LOCKED_SHAREDCACHE;
  275. }
  276. for(pIter=pBt.pLock; pIter; pIter=pIter.pNext){
  277. /* The condition (pIter.eLock!=eLock) in the following if(...)
  278. ** statement is a simplification of:
  279. **
  280. ** (eLock==WRITE_LOCK || pIter.eLock==WRITE_LOCK)
  281. **
  282. ** since we know that if eLock==WRITE_LOCK, then no other connection
  283. ** may hold a WRITE_LOCK on any table in this file (since there can
  284. ** only be a single writer).
  285. */
  286. Debug.Assert( pIter.eLock==READ_LOCK || pIter.eLock==WRITE_LOCK );
  287. Debug.Assert( eLock==READ_LOCK || pIter.pBtree==p || pIter.eLock==READ_LOCK);
  288. if( pIter.pBtree!=p && pIter.iTable==iTab && pIter.eLock!=eLock ){
  289. sqlite3ConnectionBlocked(p.db, pIter.pBtree.db);
  290. if( eLock==WRITE_LOCK ){
  291. Debug.Assert( p==pBt.pWriter );
  292. pBt.isPending = 1;
  293. }
  294. return SQLITE_LOCKED_SHAREDCACHE;
  295. }
  296. }
  297. return SQLITE_OK;
  298. }
  299. #endif //* !SQLITE_OMIT_SHARED_CACHE */
  300. #if !SQLITE_OMIT_SHARED_CACHE
  301. /*
  302. ** Add a lock on the table with root-page iTable to the shared-btree used
  303. ** by Btree handle p. Parameter eLock must be either READ_LOCK or
  304. ** WRITE_LOCK.
  305. **
  306. ** This function assumes the following:
  307. **
  308. ** (a) The specified Btree object p is connected to a sharable
  309. ** database (one with the BtShared.sharable flag set), and
  310. **
  311. ** (b) No other Btree objects hold a lock that conflicts
  312. ** with the requested lock (i.e. querySharedCacheTableLock() has
  313. ** already been called and returned SQLITE_OK).
  314. **
  315. ** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM
  316. ** is returned if a malloc attempt fails.
  317. */
  318. static int setSharedCacheTableLock(Btree p, Pgno iTable, u8 eLock){
  319. BtShared pBt = p.pBt;
  320. BtLock pLock = 0;
  321. BtLock pIter;
  322. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  323. Debug.Assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
  324. Debug.Assert( p.db!=null );
  325. /* A connection with the read-uncommitted flag set will never try to
  326. ** obtain a read-lock using this function. The only read-lock obtained
  327. ** by a connection in read-uncommitted mode is on the sqlite_master
  328. ** table, and that lock is obtained in BtreeBeginTrans(). */
  329. Debug.Assert( 0==(p.db.flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK );
  330. /* This function should only be called on a sharable b-tree after it
  331. ** has been determined that no other b-tree holds a conflicting lock. */
  332. Debug.Assert( p.sharable );
  333. Debug.Assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) );
  334. /* First search the list for an existing lock on this table. */
  335. for(pIter=pBt.pLock; pIter; pIter=pIter.pNext){
  336. if( pIter.iTable==iTable && pIter.pBtree==p ){
  337. pLock = pIter;
  338. break;
  339. }
  340. }
  341. /* If the above search did not find a BtLock struct associating Btree p
  342. ** with table iTable, allocate one and link it into the list.
  343. */
  344. if( !pLock ){
  345. pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock));
  346. if( !pLock ){
  347. return SQLITE_NOMEM;
  348. }
  349. pLock.iTable = iTable;
  350. pLock.pBtree = p;
  351. pLock.pNext = pBt.pLock;
  352. pBt.pLock = pLock;
  353. }
  354. /* Set the BtLock.eLock variable to the maximum of the current lock
  355. ** and the requested lock. This means if a write-lock was already held
  356. ** and a read-lock requested, we don't incorrectly downgrade the lock.
  357. */
  358. Debug.Assert( WRITE_LOCK>READ_LOCK );
  359. if( eLock>pLock.eLock ){
  360. pLock.eLock = eLock;
  361. }
  362. return SQLITE_OK;
  363. }
  364. #endif //* !SQLITE_OMIT_SHARED_CACHE */
  365. #if !SQLITE_OMIT_SHARED_CACHE
  366. /*
  367. ** Release all the table locks (locks obtained via calls to
  368. ** the setSharedCacheTableLock() procedure) held by Btree object p.
  369. **
  370. ** This function assumes that Btree p has an open read or write
  371. ** transaction. If it does not, then the BtShared.isPending variable
  372. ** may be incorrectly cleared.
  373. */
  374. static void clearAllSharedCacheTableLocks(Btree p){
  375. BtShared pBt = p.pBt;
  376. BtLock **ppIter = &pBt.pLock;
  377. Debug.Assert( sqlite3BtreeHoldsMutex(p) );
  378. Debug.Assert( p.sharable || 0==*ppIter );
  379. Debug.Assert( p.inTrans>0 );
  380. while( ppIter ){
  381. BtLock pLock = ppIter;
  382. Debug.Assert( pBt.isExclusive==null || pBt.pWriter==pLock.pBtree );
  383. Debug.Assert( pLock.pBtree.inTrans>=pLock.eLock );
  384. if( pLock.pBtree==p ){
  385. ppIter = pLock.pNext;
  386. Debug.Assert( pLock.iTable!=1 || pLock==&p.lock );
  387. if( pLock.iTable!=1 ){
  388. pLock=null;//sqlite3_free(ref pLock);
  389. }
  390. }else{
  391. ppIter = &pLock.pNext;
  392. }
  393. }
  394. Debug.Assert( pBt.isPending==null || pBt.pWriter );
  395. if( pBt.pWriter==p ){
  396. pBt.pWriter = 0;
  397. pBt.isExclusive = 0;
  398. pBt.isPending = 0;
  399. }else if( pBt.nTransaction==2 ){
  400. /* This function is called when Btree p is concluding its
  401. ** transaction. If there currently exists a writer, and p is not
  402. ** that writer, then the number of locks held by connections other
  403. ** than the writer must be about to drop to zero. In this case
  404. ** set the isPending flag to 0.
  405. **
  406. ** If there is not currently a writer, then BtShared.isPending must
  407. ** be zero already. So this next line is harmless in that case.
  408. */
  409. pBt.isPending = 0;
  410. }
  411. }
  412. /*
  413. ** This function changes all write-locks held by Btree p into read-locks.
  414. */
  415. static void downgradeAllSharedCacheTableLocks(Btree p){
  416. BtShared pBt = p.pBt;
  417. if( pBt.pWriter==p ){
  418. BtLock pLock;
  419. pBt.pWriter = 0;
  420. pBt.isExclusive = 0;
  421. pBt.isPending = 0;
  422. for(pLock=pBt.pLock; pLock; pLock=pLock.pNext){
  423. Debug.Assert( pLock.eLock==READ_LOCK || pLock.pBtree==p );
  424. pLock.eLock = READ_LOCK;
  425. }
  426. }
  427. }
  428. #endif //* SQLITE_OMIT_SHARED_CACHE */
  429. //static void releasePage(MemPage pPage); /* Forward reference */
  430. /*
  431. ***** This routine is used inside of assert() only ****
  432. **
  433. ** Verify that the cursor holds the mutex on its BtShared
  434. */
  435. #if SQLITE_DEBUG
  436. static bool cursorHoldsMutex( BtCursor p )
  437. {
  438. return sqlite3_mutex_held( p.pBt.mutex );
  439. }
  440. #else
  441. static bool cursorHoldsMutex(BtCursor p) { return true; }
  442. #endif
  443. #if !SQLITE_OMIT_INCRBLOB
  444. /*
  445. ** Invalidate the overflow page-list cache for cursor pCur, if any.
  446. */
  447. static void invalidateOverflowCache(BtCursor pCur){
  448. Debug.Assert( cursorHoldsMutex(pCur) );
  449. //sqlite3_free(ref pCur.aOverflow);
  450. pCur.aOverflow = null;
  451. }
  452. /*
  453. ** Invalidate the overflow page-list cache for all cursors opened
  454. ** on the shared btree structure pBt.
  455. */
  456. static void invalidateAllOverflowCache(BtShared pBt){
  457. BtCursor p;
  458. Debug.Assert( sqlite3_mutex_held(pBt.mutex) );
  459. for(p=pBt.pCursor; p!=null; p=p.pNext){
  460. invalidateOverflowCache(p);
  461. }
  462. }
  463. /*
  464. ** This function is called before modifying the contents of a table
  465. ** to invalidate any incrblob cursors that are open on the
  466. ** row or one of the rows being modified.
  467. **
  468. ** If argument isClearTable is true, then the entire contents of the
  469. ** table is about to be deleted. In this case invalidate all incrblob
  470. ** cursors open on any row within the table with root-page pgnoRoot.
  471. **
  472. ** Otherwise, if argument isClearTable is false, then the row with
  473. ** rowid iRow is being replaced or deleted. In this case invalidate
  474. ** only those incrblob cursors open on that specific row.
  475. */
  476. static void invalidateIncrblobCursors(
  477. Btree pBtree, /* The database file to check */
  478. i64 iRow, /* The rowid that might be changing */
  479. int isClearTable /* True if all rows are being deleted */
  480. ){
  481. BtCursor p;
  482. BtShared pBt = pBtree.pBt;
  483. Debug.Assert( sqlite3BtreeHoldsMutex(pBtree) );
  484. for(p=pBt.pCursor; p!=null; p=p.pNext){
  485. if( p.isIncrblobHandle && (isClearTable || p.info.nKey==iRow) ){
  486. p.eState = CURSOR_INVALID;
  487. }
  488. }
  489. }
  490. #else
  491. /* Stub functions when INCRBLOB is omitted */
  492. //#define invalidateOverflowCache(x)
  493. static void invalidateOverflowCache( BtCursor pCur )
  494. {
  495. }
  496. //#define invalidateAllOverflowCache(x)
  497. static void invalidateAllOverflowCache( BtShared pBt )
  498. {
  499. }
  500. //#define invalidateIncrblobCursors(x,y,z)
  501. static void invalidateIncrblobCursors( Btree x, i64 y, int z )
  502. {
  503. }
  504. #endif //* SQLITE_OMIT_INCRBLOB */
  505. /*
  506. ** Set bit pgno of the BtShared.pHasContent bitvec. This is called
  507. ** when a page that previously contained data becomes a free-list leaf
  508. ** page.
  509. **
  510. ** The BtShared.pHasContent bitvec exists to work around an obscure
  511. ** bug caused by the interaction of two useful IO optimizations surrounding
  512. ** free-list leaf pages:
  513. **
  514. ** 1) When all data is deleted from a page and the page becomes
  515. ** a free-list leaf page, the page is not written to the database
  516. ** (as free-list leaf pages contain no meaningful data). Sometimes
  517. ** such a page is not even journalled (as it will not be modified,
  518. ** why bother journalling it?).
  519. **
  520. ** 2) When a free-list leaf page is reused, its content is not read
  521. ** from the database or written to the journal file (why should it
  522. ** be, if it is not at all meaningful?).
  523. **
  524. ** By themselves, these optimizations work fine and provide a handy
  525. ** performance boost to bulk delete or insert operations. However, if
  526. ** a page is moved to the free-list and then reused within the same
  527. ** transaction, a problem comes up. If the page is not journalled when
  528. ** it is moved to the free-list and it is also not journalled when it
  529. ** is extracted from the free-list and reused, then the original data
  530. ** may be lost. In the event of a rollback, it may not be possible
  531. ** to restore the database to its original configuration.
  532. **
  533. ** The solution is the BtShared.pHasContent bitvec. Whenever a page is
  534. ** moved to become a free-list leaf page, the corresponding bit is
  535. ** set in the bitvec. Whenever a leaf page is extracted from the free-list,
  536. ** optimization 2 above is omitted if the corresponding bit is already
  537. ** set in BtShared.pHasContent. The contents of the bitvec are cleared
  538. ** at the end of every transaction.
  539. */
  540. static int btreeSetHasContent( BtShared pBt, Pgno pgno )
  541. {
  542. int rc = SQLITE_OK;
  543. if ( null == pBt.pHasContent )
  544. {
  545. Debug.Assert( pgno <= pBt.nPage );
  546. pBt.pHasContent = sqlite3BitvecCreate( pBt.nPage );
  547. //if ( null == pBt.pHasContent )
  548. //{
  549. // rc = SQLITE_NOMEM;
  550. //}
  551. }
  552. if ( rc == SQLITE_OK && pgno <= sqlite3BitvecSize( pBt.pHasContent ) )
  553. {
  554. rc = sqlite3BitvecSet( pBt.pHasContent, pgno );
  555. }
  556. return rc;
  557. }
  558. /*
  559. ** Query the BtShared.pHasContent vector.
  560. **
  561. ** This function is called when a free-list leaf page is removed from the
  562. ** free-list for reuse. It returns false if it is safe to retrieve the
  563. ** page from the pager layer with the 'no-content' flag set. True otherwise.
  564. */
  565. static bool btreeGetHasContent( BtShared pBt, Pgno pgno )
  566. {
  567. Bitvec p = pBt.pHasContent;
  568. return ( p != null && ( pgno > sqlite3BitvecSize( p ) || sqlite3BitvecTest( p, pgno ) != 0 ) );
  569. }
  570. /*
  571. ** Clear (destroy) the BtShared.pHasContent bitvec. This should be
  572. ** invoked at the conclusion of each write-transaction.
  573. */
  574. static void btreeClearHasContent( BtShared pBt )
  575. {
  576. sqlite3BitvecDestroy( ref pBt.pHasContent );
  577. pBt.pHasContent = null;
  578. }
  579. /*
  580. ** Save the current cursor position in the variables BtCursor.nKey
  581. ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
  582. **
  583. ** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
  584. ** prior to calling this routine.
  585. */
  586. static int saveCursorPosition( BtCursor pCur )
  587. {
  588. int rc;
  589. Debug.Assert( CURSOR_VALID == pCur.eState );
  590. Debug.Assert( null == pCur.pKey );
  591. Debug.Assert( cursorHoldsMutex( pCur ) );
  592. rc = sqlite3BtreeKeySize( pCur, ref pCur.nKey );
  593. Debug.Assert( rc == SQLITE_OK ); /* KeySize() cannot fail */
  594. /* If this is an intKey table, then the above call to BtreeKeySize()
  595. ** stores the integer key in pCur.nKey. In this case this value is
  596. ** all that is required. Otherwise, if pCur is not open on an intKey
  597. ** table, then malloc space for and store the pCur.nKey bytes of key
  598. ** data.
  599. */
  600. if ( 0 == pCur.apPage[0].intKey )
  601. {
  602. byte[] pKey = sqlite3Malloc( (int)pCur.nKey );
  603. //if( pKey !=null){
  604. rc = sqlite3BtreeKey( pCur, 0, (u32)pCur.nKey, pKey );
  605. if ( rc == SQLITE_OK )
  606. {
  607. pCur.pKey = pKey;
  608. }
  609. //else{
  610. // sqlite3_free(ref pKey);
  611. //}
  612. //}else{
  613. // rc = SQLITE_NOMEM;
  614. //}
  615. }
  616. Debug.Assert( 0 == pCur.apPage[0].intKey || null == pCur.pKey );
  617. if ( rc == SQLITE_OK )
  618. {
  619. int i;
  620. for ( i = 0; i <= pCur.iPage; i++ )
  621. {
  622. releasePage( pCur.apPage[i] );
  623. pCur.apPage[i] = null;
  624. }
  625. pCur.iPage = -1;
  626. pCur.eState = CURSOR_REQUIRESEEK;
  627. }
  628. invalidateOverflowCache( pCur );
  629. return rc;
  630. }
  631. /*
  632. ** Save the positions of all cursors (except pExcept) that are open on
  633. ** the table with root-page iRoot. Usually, this is called just before cursor
  634. ** pExcept is used to modify the table (BtreeDelete() or BtreeInsert()).
  635. */
  636. static int saveAllCursors( BtShared pBt, Pgno iRoot, BtCursor pExcept )
  637. {
  638. BtCursor p;
  639. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  640. Debug.Assert( pExcept == null || pExcept.pBt == pBt );
  641. for ( p = pBt.pCursor; p != null; p = p.pNext )
  642. {
  643. if ( p != pExcept && ( 0 == iRoot || p.pgnoRoot == iRoot ) &&
  644. p.eState == CURSOR_VALID )
  645. {
  646. int rc = saveCursorPosition( p );
  647. if ( SQLITE_OK != rc )
  648. {
  649. return rc;
  650. }
  651. }
  652. }
  653. return SQLITE_OK;
  654. }
  655. /*
  656. ** Clear the current cursor position.
  657. */
  658. static void sqlite3BtreeClearCursor( BtCursor pCur )
  659. {
  660. Debug.Assert( cursorHoldsMutex( pCur ) );
  661. sqlite3_free( ref pCur.pKey );
  662. pCur.eState = CURSOR_INVALID;
  663. }
  664. /*
  665. ** In this version of BtreeMoveto, pKey is a packed index record
  666. ** such as is generated by the OP_MakeRecord opcode. Unpack the
  667. ** record and then call BtreeMovetoUnpacked() to do the work.
  668. */
  669. static int btreeMoveto(
  670. BtCursor pCur, /* Cursor open on the btree to be searched */
  671. byte[] pKey, /* Packed key if the btree is an index */
  672. i64 nKey, /* Integer key for tables. Size of pKey for indices */
  673. int bias, /* Bias search to the high end */
  674. ref int pRes /* Write search results here */
  675. )
  676. {
  677. int rc; /* Status code */
  678. UnpackedRecord pIdxKey; /* Unpacked index key */
  679. UnpackedRecord aSpace = new UnpackedRecord();//char aSpace[150]; /* Temp space for pIdxKey - to avoid a malloc */
  680. if ( pKey != null )
  681. {
  682. Debug.Assert( nKey == (i64)(int)nKey );
  683. pIdxKey = sqlite3VdbeRecordUnpack( pCur.pKeyInfo, (int)nKey, pKey,
  684. aSpace, 16 );//sizeof( aSpace ) );
  685. //if ( pIdxKey == null )
  686. // return SQLITE_NOMEM;
  687. }
  688. else
  689. {
  690. pIdxKey = null;
  691. }
  692. rc = sqlite3BtreeMovetoUnpacked( pCur, pIdxKey, nKey, bias != 0 ? 1 : 0, ref pRes );
  693. if ( pKey != null )
  694. {
  695. sqlite3VdbeDeleteUnpackedRecord( pIdxKey );
  696. }
  697. return rc;
  698. }
  699. /*
  700. ** Restore the cursor to the position it was in (or as close to as possible)
  701. ** when saveCursorPosition() was called. Note that this call deletes the
  702. ** saved position info stored by saveCursorPosition(), so there can be
  703. ** at most one effective restoreCursorPosition() call after each
  704. ** saveCursorPosition().
  705. */
  706. static int btreeRestoreCursorPosition( BtCursor pCur )
  707. {
  708. int rc;
  709. Debug.Assert( cursorHoldsMutex( pCur ) );
  710. Debug.Assert( pCur.eState >= CURSOR_REQUIRESEEK );
  711. if ( pCur.eState == CURSOR_FAULT )
  712. {
  713. return pCur.skipNext;
  714. }
  715. pCur.eState = CURSOR_INVALID;
  716. rc = btreeMoveto( pCur, pCur.pKey, pCur.nKey, 0, ref pCur.skipNext );
  717. if ( rc == SQLITE_OK )
  718. {
  719. //sqlite3_free(ref pCur.pKey);
  720. pCur.pKey = null;
  721. Debug.Assert( pCur.eState == CURSOR_VALID || pCur.eState == CURSOR_INVALID );
  722. }
  723. return rc;
  724. }
  725. //#define restoreCursorPosition(p) \
  726. // (p.eState>=CURSOR_REQUIRESEEK ? \
  727. // btreeRestoreCursorPosition(p) : \
  728. // SQLITE_OK)
  729. static int restoreCursorPosition( BtCursor pCur )
  730. {
  731. if ( pCur.eState >= CURSOR_REQUIRESEEK )
  732. return btreeRestoreCursorPosition( pCur );
  733. else
  734. return SQLITE_OK;
  735. }
  736. /*
  737. ** Determine whether or not a cursor has moved from the position it
  738. ** was last placed at. Cursors can move when the row they are pointing
  739. ** at is deleted out from under them.
  740. **
  741. ** This routine returns an error code if something goes wrong. The
  742. ** integer pHasMoved is set to one if the cursor has moved and 0 if not.
  743. */
  744. static int sqlite3BtreeCursorHasMoved( BtCursor pCur, ref int pHasMoved )
  745. {
  746. int rc;
  747. rc = restoreCursorPosition( pCur );
  748. if ( rc != 0 )
  749. {
  750. pHasMoved = 1;
  751. return rc;
  752. }
  753. if ( pCur.eState != CURSOR_VALID || pCur.skipNext != 0 )
  754. {
  755. pHasMoved = 1;
  756. }
  757. else
  758. {
  759. pHasMoved = 0;
  760. }
  761. return SQLITE_OK;
  762. }
  763. #if !SQLITE_OMIT_AUTOVACUUM
  764. /*
  765. ** Given a page number of a regular database page, return the page
  766. ** number for the pointer-map page that contains the entry for the
  767. ** input page number.
  768. **
  769. ** Return 0 (not a valid page) for pgno==1 since there is
  770. ** no pointer map associated with page 1. The integrity_check logic
  771. ** requires that ptrmapPageno(*,1)!=1.
  772. */
  773. static Pgno ptrmapPageno( BtShared pBt, Pgno pgno )
  774. {
  775. int nPagesPerMapPage;
  776. Pgno iPtrMap, ret;
  777. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  778. if ( pgno < 2 )
  779. return 0;
  780. nPagesPerMapPage = (int)( pBt.usableSize / 5 + 1 );
  781. iPtrMap = (Pgno)( ( pgno - 2 ) / nPagesPerMapPage );
  782. ret = (Pgno)( iPtrMap * nPagesPerMapPage ) + 2;
  783. if ( ret == PENDING_BYTE_PAGE( pBt ) )
  784. {
  785. ret++;
  786. }
  787. return ret;
  788. }
  789. /*
  790. ** Write an entry into the pointer map.
  791. **
  792. ** This routine updates the pointer map entry for page number 'key'
  793. ** so that it maps to type 'eType' and parent page number 'pgno'.
  794. **
  795. ** If pRC is initially non-zero (non-SQLITE_OK) then this routine is
  796. ** a no-op. If an error occurs, the appropriate error code is written
  797. ** into pRC.
  798. */
  799. static void ptrmapPut( BtShared pBt, Pgno key, u8 eType, Pgno parent, ref int pRC )
  800. {
  801. PgHdr pDbPage = new PgHdr(); /* The pointer map page */
  802. u8[] pPtrmap; /* The pointer map data */
  803. Pgno iPtrmap; /* The pointer map page number */
  804. int offset; /* Offset in pointer map page */
  805. int rc; /* Return code from subfunctions */
  806. if ( pRC != 0 )
  807. return;
  808. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  809. /* The master-journal page number must never be used as a pointer map page */
  810. Debug.Assert( false == PTRMAP_ISPAGE( pBt, PENDING_BYTE_PAGE( pBt ) ) );
  811. Debug.Assert( pBt.autoVacuum );
  812. if ( key == 0 )
  813. {
  814. pRC = SQLITE_CORRUPT_BKPT();
  815. return;
  816. }
  817. iPtrmap = PTRMAP_PAGENO( pBt, key );
  818. rc = sqlite3PagerGet( pBt.pPager, iPtrmap, ref pDbPage );
  819. if ( rc != SQLITE_OK )
  820. {
  821. pRC = rc;
  822. return;
  823. }
  824. offset = (int)PTRMAP_PTROFFSET( iPtrmap, key );
  825. if ( offset < 0 )
  826. {
  827. pRC = SQLITE_CORRUPT_BKPT();
  828. goto ptrmap_exit;
  829. }
  830. Debug.Assert( offset <= (int)pBt.usableSize - 5 );
  831. pPtrmap = sqlite3PagerGetData( pDbPage );
  832. if ( eType != pPtrmap[offset] || sqlite3Get4byte( pPtrmap, offset + 1 ) != parent )
  833. {
  834. TRACE( "PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent );
  835. pRC = rc = sqlite3PagerWrite( pDbPage );
  836. if ( rc == SQLITE_OK )
  837. {
  838. pPtrmap[offset] = eType;
  839. sqlite3Put4byte( pPtrmap, offset + 1, parent );
  840. }
  841. }
  842. ptrmap_exit:
  843. sqlite3PagerUnref( pDbPage );
  844. }
  845. /*
  846. ** Read an entry from the pointer map.
  847. **
  848. ** This routine retrieves the pointer map entry for page 'key', writing
  849. ** the type and parent page number to pEType and pPgno respectively.
  850. ** An error code is returned if something goes wrong, otherwise SQLITE_OK.
  851. */
  852. static int ptrmapGet( BtShared pBt, Pgno key, ref u8 pEType, ref Pgno pPgno )
  853. {
  854. PgHdr pDbPage = new PgHdr();/* The pointer map page */
  855. int iPtrmap; /* Pointer map page index */
  856. u8[] pPtrmap; /* Pointer map page data */
  857. int offset; /* Offset of entry in pointer map */
  858. int rc;
  859. Debug.Assert( sqlite3_mutex_held( pBt.mutex ) );
  860. iPtrmap = (int)PTRMAP_PAGENO( pBt, key );
  861. rc = sqlite3PagerGet( pBt.pPager, (u32)iPtrmap, ref pDbPage );
  862. if ( rc != 0 )
  863. {
  864. return rc;
  865. }
  866. pPtrmap = sqlite3PagerGetData( pDbPage );
  867. offset = (int)PTRMAP_PTROFFSET( (u32)iPtrmap, key );
  868. if ( offset < 0 )
  869. {
  870. sqlite3PagerUnref( pDbPage );
  871. return SQLITE_CORRUPT_BKPT();
  872. }
  873. Debug.Assert( offset <= (int)pBt.usableSize - 5 );
  874. // Under C# pEType will always exist. No need to test; //
  875. //Debug.Assert( pEType != 0 );
  876. pEType = pPtrmap[offset];
  877. // Under C# pPgno will always exist. No need to test; //
  878. //if ( pPgno != 0 )
  879. pPgno = sqlite3Get4byte( pPtrmap, offset + 1 );
  880. sqlite3PagerUnref( pDbPage );
  881. if ( pEType < 1 || pEType > 5 )
  882. return SQLITE_CORRUPT_BKPT();
  883. return SQLITE_OK;
  884. }
  885. #else //* if defined SQLITE_OMIT_AUTOVACUUM */
  886. //#define ptrmapPut(w,x,y,z,rc)
  887. //#define ptrmapGet(w,x,y,z) SQLITE_OK
  888. //#define ptrmapPutOvflPtr(x, y, rc)
  889. #endif
  890. /*
  891. ** Given a btree page and a cell index (0 means the first cell on
  892. ** the page, 1 means the second cell, and so forth) return a pointer
  893. ** to the cell content.
  894. **
  895. ** This routine works only for pages that do not contain overflow cells.
  896. */
  897. //#define findCell(P,I) \
  898. // ((P).aData + ((P).maskPage & get2byte((P).aData[(P).cellOffset+2*(I)])))
  899. static int findCell( MemPage pPage, int iCell )
  900. {
  901. return get2byte( pPage.aData, pPage.cellOffset + 2 * ( iCell ) );
  902. }
  903. //#define findCellv2(D,M,O,I) (D+(M&get2byte(D+(O+2*(I)))))
  904. static u8[] findCellv2( u8[] pPage, u16 iCell, u16 O, int I )
  905. {
  906. Debugger.Break();
  907. return pPage;
  908. }
  909. /*
  910. ** This a more complex version of findCell() that works for
  911. ** pages that do contain overflow cells.
  912. */
  913. static int findOverflowCell( MemPage pPage, int iCell )
  914. {
  915. int i;
  916. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  917. for ( i = pPage.nOverflow - 1; i >= 0; i-- )
  918. {
  919. int k;
  920. _OvflCell pOvfl;
  921. pOvfl = pPage.aOvfl[i];
  922. k = pOvfl.idx;
  923. if ( k <= iCell )
  924. {
  925. if ( k == iCell )
  926. {
  927. //return pOvfl.pCell;
  928. return -i - 1; // Negative Offset means overflow cells
  929. }
  930. iCell--;
  931. }
  932. }
  933. return findCell( pPage, iCell );
  934. }
  935. /*
  936. ** Parse a cell content block and fill in the CellInfo structure. There
  937. ** are two versions of this function. btreeParseCell() takes a
  938. ** cell index as the second argument and btreeParseCellPtr()
  939. ** takes a pointer to the body of the cell as its second argument.
  940. **
  941. ** Within this file, the parseCell() macro can be called instead of
  942. ** btreeParseCellPtr(). Using some compilers, this will be faster.
  943. */
  944. //OVERLOADS
  945. static void btreeParseCellPtr(
  946. MemPage pPage, /* Page containing the cell */
  947. int iCell, /* Pointer to the cell text. */
  948. ref CellInfo pInfo /* Fill in this structure */
  949. )
  950. {
  951. btreeParseCellPtr( pPage, pPage.aData, iCell, ref pInfo );
  952. }
  953. static void btreeParseCellPtr(
  954. MemPage pPage, /* Page containing the cell */
  955. byte[] pCell, /* The actual data */
  956. ref CellInfo pInfo /* Fill in this structure */
  957. )
  958. {
  959. btreeParseCellPtr( pPage, pCell, 0, ref pInfo );
  960. }
  961. static void btreeParseCellPtr(
  962. MemPage pPage, /* Page containing the cell */
  963. u8[] pCell, /* Pointer to the cell text. */
  964. int iCell, /* Pointer to the cell text. */
  965. ref CellInfo pInfo /* Fill in this structure */
  966. )
  967. {
  968. u16 n; /* Number bytes in cell content header */
  969. u32 nPayload = 0; /* Number of bytes of cell payload */
  970. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  971. if ( pInfo.pCell != pCell )
  972. pInfo.pCell = pCell;
  973. pInfo.iCell = iCell;
  974. Debug.Assert( pPage.leaf == 0 || pPage.leaf == 1 );
  975. n = pPage.childPtrSize;
  976. Debug.Assert( n == 4 - 4 * pPage.leaf );
  977. if ( pPage.intKey != 0 )
  978. {
  979. if ( pPage.hasData != 0 )
  980. {
  981. n += (u16)getVarint32( pCell, iCell + n, out nPayload );
  982. }
  983. else
  984. {
  985. nPayload = 0;
  986. }
  987. n += (u16)getVarint( pCell, iCell + n, out pInfo.nKey );
  988. pInfo.nData = nPayload;
  989. }
  990. else
  991. {
  992. pInfo.nData = 0;
  993. n += (u16)getVarint32( pCell, iCell + n, out nPayload );
  994. pInfo.nKey = nPayload;
  995. }
  996. pInfo.nPayload = nPayload;
  997. pInfo.nHeader = n;
  998. testcase( nPayload == pPage.maxLocal );
  999. testcase( nPayload == pPage.maxLocal + 1 );
  1000. if ( likely( nPayload <= pPage.maxLocal ) )
  1001. {
  1002. /* This is the (easy) common case where the entire payload fits
  1003. ** on the local page. No overflow is required.
  1004. */
  1005. if ( ( pInfo.nSize = (u16)( n + nPayload ) ) < 4 )
  1006. pInfo.nSize = 4;
  1007. pInfo.nLocal = (u16)nPayload;
  1008. pInfo.iOverflow = 0;
  1009. }
  1010. else
  1011. {
  1012. /* If the payload will not fit completely on the local page, we have
  1013. ** to decide how much to store locally and how much to spill onto
  1014. ** overflow pages. The strategy is to minimize the amount of unused
  1015. ** space on overflow pages while keeping the amount of local storage
  1016. ** in between minLocal and maxLocal.
  1017. **
  1018. ** Warning: changing the way overflow payload is distributed in any
  1019. ** way will result in an incompatible file format.
  1020. */
  1021. int minLocal; /* Minimum amount of payload held locally */
  1022. int maxLocal; /* Maximum amount of payload held locally */
  1023. int surplus; /* Overflow payload available for local storage */
  1024. minLocal = pPage.minLocal;
  1025. maxLocal = pPage.maxLocal;
  1026. surplus = (int)( minLocal + ( nPayload - minLocal ) % ( pPage.pBt.usableSize - 4 ) );
  1027. testcase( surplus == maxLocal );
  1028. testcase( surplus == maxLocal + 1 );
  1029. if ( surplus <= maxLocal )
  1030. {
  1031. pInfo.nLocal = (u16)surplus;
  1032. }
  1033. else
  1034. {
  1035. pInfo.nLocal = (u16)minLocal;
  1036. }
  1037. pInfo.iOverflow = (u16)( pInfo.nLocal + n );
  1038. pInfo.nSize = (u16)( pInfo.iOverflow + 4 );
  1039. }
  1040. }
  1041. //#define parseCell(pPage, iCell, pInfo) \
  1042. // btreeParseCellPtr((pPage), findCell((pPage), (iCell)), (pInfo))
  1043. static void parseCell( MemPage pPage, int iCell, ref CellInfo pInfo )
  1044. {
  1045. btreeParseCellPtr( pPage, findCell( pPage, iCell ), ref pInfo );
  1046. }
  1047. static void btreeParseCell(
  1048. MemPage pPage, /* Page containing the cell */
  1049. int iCell, /* The cell index. First cell is 0 */
  1050. ref CellInfo pInfo /* Fill in this structure */
  1051. )
  1052. {
  1053. parseCell( pPage, iCell, ref pInfo );
  1054. }
  1055. /*
  1056. ** Compute the total number of bytes that a Cell needs in the cell
  1057. ** data area of the btree-page. The return number includes the cell
  1058. ** data header and the local payload, but not any overflow page or
  1059. ** the space used by the cell pointer.
  1060. */
  1061. // Alternative form for C#
  1062. static u16 cellSizePtr( MemPage pPage, int iCell )
  1063. {
  1064. CellInfo info = new CellInfo();
  1065. byte[] pCell = new byte[13];
  1066. // Minimum Size = (2 bytes of Header or (4) Child Pointer) + (maximum of) 9 bytes data
  1067. if ( iCell < 0 )// Overflow Cell
  1068. Buffer.BlockCopy( pPage.aOvfl[-( iCell + 1 )].pCell, 0, pCell, 0, pCell.Length < pPage.aOvfl[-( iCell + 1 )].pCell.Length ? pCell.Length : pPage.aOvfl[-( iCell + 1 )].pCell.Length );
  1069. else if ( iCell >= pPage.aData.Length + 1 - pCell.Length )
  1070. Buffer.BlockCopy( pPage.aData, iCell, pCell, 0, pPage.aData.Length - iCell );
  1071. else
  1072. Buffer.BlockCopy( pPage.aData, iCell, pCell, 0, pCell.Length );
  1073. btreeParseCellPtr( pPage, pCell, ref info );
  1074. return info.nSize;
  1075. }
  1076. // Alternative form for C#
  1077. static u16 cellSizePtr( MemPage pPage, byte[] pCell, int offset )
  1078. {
  1079. CellInfo info = new CellInfo();
  1080. info.pCell = sqlite3Malloc( pCell.Length );
  1081. Buffer.BlockCopy( pCell, offset, info.pCell, 0, pCell.Length - offset );
  1082. btreeParseCellPtr( pPage, info.pCell, ref info );
  1083. return info.nSize;
  1084. }
  1085. static u16 cellSizePtr( MemPage pPage, u8[] pCell )
  1086. {
  1087. int _pIter = pPage.childPtrSize; //u8 pIter = &pCell[pPage.childPtrSize];
  1088. u32 nSize = 0;
  1089. #if SQLITE_DEBUG || DEBUG
  1090. /* The value returned by this function should always be the same as
  1091. ** the (CellInfo.nSize) value found by doing a full parse of the
  1092. ** cell. If SQLITE_DEBUG is defined, an Debug.Assert() at the bottom of
  1093. ** this function verifies that this invariant is not violated. */
  1094. CellInfo debuginfo = new CellInfo();
  1095. btreeParseCellPtr( pPage, pCell, ref debuginfo );
  1096. #else
  1097. CellInfo debuginfo = new CellInfo();
  1098. #endif
  1099. if ( pPage.intKey != 0 )
  1100. {
  1101. int pEnd;
  1102. if ( pPage.hasData != 0 )
  1103. {
  1104. _pIter += getVarint32( pCell, out nSize );// pIter += getVarint32( pIter, out nSize );
  1105. }
  1106. else
  1107. {
  1108. nSize = 0;
  1109. }
  1110. /* pIter now points at the 64-bit integer key value, a variable length
  1111. ** integer. The following block moves pIter to point at the first byte
  1112. ** past the end of the key value. */
  1113. pEnd = _pIter + 9;//pEnd = &pIter[9];
  1114. while ( ( ( pCell[_pIter++] ) & 0x80 ) != 0 && _pIter < pEnd )
  1115. ;//while( (pIter++)&0x80 && pIter<pEnd );
  1116. }
  1117. else
  1118. {
  1119. _pIter += getVarint32( pCell, _pIter, out nSize ); //pIter += getVarint32( pIter, out nSize );
  1120. }
  1121. testcase( nSize == pPage.maxLocal );
  1122. testcase( nSize == pPage.maxLocal + 1 );
  1123. if ( nSize > pPage.maxLocal )
  1124. {
  1125. int minLocal = pPage.minLocal;
  1126. nSize = (u32)( minLocal + ( nSize - minLocal ) % ( pPage.pBt.usableSize - 4 ) );
  1127. testcase( nSize == pPage.maxLocal );
  1128. testcase( nSize == pPage.maxLocal + 1 );
  1129. if ( nSize > pPage.maxLocal )
  1130. {
  1131. nSize = (u32)minLocal;
  1132. }
  1133. nSize += 4;
  1134. }
  1135. nSize += (uint)_pIter;//nSize += (u32)(pIter - pCell);
  1136. /* The minimum size of any cell is 4 bytes. */
  1137. if ( nSize < 4 )
  1138. {
  1139. nSize = 4;
  1140. }
  1141. Debug.Assert( nSize == debuginfo.nSize );
  1142. return (u16)nSize;
  1143. }
  1144. #if SQLITE_DEBUG
  1145. /* This variation on cellSizePtr() is used inside of assert() statements
  1146. ** only. */
  1147. static u16 cellSize( MemPage pPage, int iCell )
  1148. {
  1149. return cellSizePtr( pPage, findCell( pPage, iCell ) );
  1150. }
  1151. #else
  1152. static int cellSize(MemPage pPage, int iCell) { return -1; }
  1153. #endif
  1154. #if !SQLITE_OMIT_AUTOVACUUM
  1155. /*
  1156. ** If the cell pCell, part of page pPage contains a pointer
  1157. ** to an overflow page, insert an entry into the pointer-map
  1158. ** for the overflow page.
  1159. */
  1160. static void ptrmapPutOvflPtr( MemPage pPage, int pCell, ref int pRC )
  1161. {
  1162. if ( pRC != 0 )
  1163. return;
  1164. CellInfo info = new CellInfo();
  1165. Debug.Assert( pCell != 0 );
  1166. btreeParseCellPtr( pPage, pCell, ref info );
  1167. Debug.Assert( ( info.nData + ( pPage.intKey != 0 ? 0 : info.nKey ) ) == info.nPayload );
  1168. if ( info.iOverflow != 0 )
  1169. {
  1170. Pgno ovfl = sqlite3Get4byte( pPage.aData, pCell, info.iOverflow );
  1171. ptrmapPut( pPage.pBt, ovfl, PTRMAP_OVERFLOW1, pPage.pgno, ref pRC );
  1172. }
  1173. }
  1174. static void ptrmapPutOvflPtr( MemPage pPage, u8[] pCell, ref int pRC )
  1175. {
  1176. if ( pRC != 0 )
  1177. return;
  1178. CellInfo info = new CellInfo();
  1179. Debug.Assert( pCell != null );
  1180. btreeParseCellPtr( pPage, pCell, ref info );
  1181. Debug.Assert( ( info.nData + ( pPage.intKey != 0 ? 0 : info.nKey ) ) == info.nPayload );
  1182. if ( info.iOverflow != 0 )
  1183. {
  1184. Pgno ovfl = sqlite3Get4byte( pCell, info.iOverflow );
  1185. ptrmapPut( pPage.pBt, ovfl, PTRMAP_OVERFLOW1, pPage.pgno, ref pRC );
  1186. }
  1187. }
  1188. #endif
  1189. /*
  1190. ** Defragment the page given. All Cells are moved to the
  1191. ** end of the page and all free space is collected into one
  1192. ** big FreeBlk that occurs in between the header and cell
  1193. ** pointer array and the cell content area.
  1194. */
  1195. static int defragmentPage( MemPage pPage )
  1196. {
  1197. int i; /* Loop counter */
  1198. int pc; /* Address of a i-th cell */
  1199. int addr; /* Offset of first byte after cell pointer array */
  1200. int hdr; /* Offset to the page header */
  1201. int size; /* Size of a cell */
  1202. int usableSize; /* Number of usable bytes on a page */
  1203. int cellOffset; /* Offset to the cell pointer array */
  1204. int cbrk; /* Offset to the cell content area */
  1205. int nCell; /* Number of cells on the page */
  1206. byte[] data; /* The page data */
  1207. byte[] temp; /* Temp area for cell content */
  1208. int iCellFirst; /* First allowable cell index */
  1209. int iCellLast; /* Last possible cell index */
  1210. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1211. Debug.Assert( pPage.pBt != null );
  1212. Debug.Assert( pPage.pBt.usableSize <= SQLITE_MAX_PAGE_SIZE );
  1213. Debug.Assert( pPage.nOverflow == 0 );
  1214. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  1215. temp = sqlite3PagerTempSpace( pPage.pBt.pPager );
  1216. data = pPage.aData;
  1217. hdr = pPage.hdrOffset;
  1218. cellOffset = pPage.cellOffset;
  1219. nCell = pPage.nCell;
  1220. Debug.Assert( nCell == get2byte( data, hdr + 3 ) );
  1221. usableSize = (int)pPage.pBt.usableSize;
  1222. cbrk = get2byte( data, hdr + 5 );
  1223. Buffer.BlockCopy( data, cbrk, temp, cbrk, usableSize - cbrk );//memcpy( temp[cbrk], ref data[cbrk], usableSize - cbrk );
  1224. cbrk = usableSize;
  1225. iCellFirst = cellOffset + 2 * nCell;
  1226. iCellLast = usableSize - 4;
  1227. for ( i = 0; i < nCell; i++ )
  1228. {
  1229. int pAddr; /* The i-th cell pointer */
  1230. pAddr = cellOffset + i * 2; // &data[cellOffset + i * 2];
  1231. pc = get2byte( data, pAddr );
  1232. testcase( pc == iCellFirst );
  1233. testcase( pc == iCellLast );
  1234. #if !(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
  1235. /* These conditions have already been verified in btreeInitPage()
  1236. ** if SQLITE_ENABLE_OVERSIZE_CELL_CHECK is defined
  1237. */
  1238. if ( pc < iCellFirst || pc > iCellLast )
  1239. {
  1240. return SQLITE_CORRUPT_BKPT();
  1241. }
  1242. #endif
  1243. Debug.Assert( pc >= iCellFirst && pc <= iCellLast );
  1244. size = cellSizePtr( pPage, temp, pc );
  1245. cbrk -= size;
  1246. #if (SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
  1247. if ( cbrk < iCellFirst || pc + size > usableSize )
  1248. {
  1249. return SQLITE_CORRUPT_BKPT();
  1250. }
  1251. #else
  1252. if ( cbrk < iCellFirst || pc + size > usableSize )
  1253. {
  1254. return SQLITE_CORRUPT_BKPT();
  1255. }
  1256. #endif
  1257. Debug.Assert( cbrk + size <= usableSize && cbrk >= iCellFirst );
  1258. testcase( cbrk + size == usableSize );
  1259. testcase( pc + size == usableSize );
  1260. Buffer.BlockCopy( temp, pc, data, cbrk, size );//memcpy(data[cbrk], ref temp[pc], size);
  1261. put2byte( data, pAddr, cbrk );
  1262. }
  1263. Debug.Assert( cbrk >= iCellFirst );
  1264. put2byte( data, hdr + 5, cbrk );
  1265. data[hdr + 1] = 0;
  1266. data[hdr + 2] = 0;
  1267. data[hdr + 7] = 0;
  1268. addr = cellOffset + 2 * nCell;
  1269. Array.Clear( data, addr, cbrk - addr ); //memset(data[iCellFirst], 0, cbrk-iCellFirst);
  1270. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1271. if ( cbrk - iCellFirst != pPage.nFree )
  1272. {
  1273. return SQLITE_CORRUPT_BKPT();
  1274. }
  1275. return SQLITE_OK;
  1276. }
  1277. /*
  1278. ** Allocate nByte bytes of space from within the B-Tree page passed
  1279. ** as the first argument. Write into pIdx the index into pPage.aData[]
  1280. ** of the first byte of allocated space. Return either SQLITE_OK or
  1281. ** an error code (usually SQLITE_CORRUPT).
  1282. **
  1283. ** The caller guarantees that there is sufficient space to make the
  1284. ** allocation. This routine might need to defragment in order to bring
  1285. ** all the space together, however. This routine will avoid using
  1286. ** the first two bytes past the cell pointer area since presumably this
  1287. ** allocation is being made in order to insert a new cell, so we will
  1288. ** also end up needing a new cell pointer.
  1289. */
  1290. static int allocateSpace( MemPage pPage, int nByte, ref int pIdx )
  1291. {
  1292. int hdr = pPage.hdrOffset; /* Local cache of pPage.hdrOffset */
  1293. u8[] data = pPage.aData; /* Local cache of pPage.aData */
  1294. int nFrag; /* Number of fragmented bytes on pPage */
  1295. int top; /* First byte of cell content area */
  1296. int gap; /* First byte of gap between cell pointers and cell content */
  1297. int rc; /* Integer return code */
  1298. u32 usableSize; /* Usable size of the page */
  1299. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1300. Debug.Assert( pPage.pBt != null );
  1301. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  1302. Debug.Assert( nByte >= 0 ); /* Minimum cell size is 4 */
  1303. Debug.Assert( pPage.nFree >= nByte );
  1304. Debug.Assert( pPage.nOverflow == 0 );
  1305. usableSize = pPage.pBt.usableSize;
  1306. Debug.Assert( nByte < usableSize - 8 );
  1307. nFrag = data[hdr + 7];
  1308. Debug.Assert( pPage.cellOffset == hdr + 12 - 4 * pPage.leaf );
  1309. gap = pPage.cellOffset + 2 * pPage.nCell;
  1310. top = get2byteNotZero( data, hdr + 5 );
  1311. if ( gap > top )
  1312. return SQLITE_CORRUPT_BKPT();
  1313. testcase( gap + 2 == top );
  1314. testcase( gap + 1 == top );
  1315. testcase( gap == top );
  1316. if ( nFrag >= 60 )
  1317. {
  1318. /* Always defragment highly fragmented pages */
  1319. rc = defragmentPage( pPage );
  1320. if ( rc != 0 )
  1321. return rc;
  1322. top = get2byteNotZero( data, hdr + 5 );
  1323. }
  1324. else if ( gap + 2 <= top )
  1325. {
  1326. /* Search the freelist looking for a free slot big enough to satisfy
  1327. ** the request. The allocation is made from the first free slot in
  1328. ** the list that is large enough to accomadate it.
  1329. */
  1330. int pc, addr;
  1331. for ( addr = hdr + 1; ( pc = get2byte( data, addr ) ) > 0; addr = pc )
  1332. {
  1333. int size; /* Size of free slot */
  1334. if ( pc > usableSize - 4 || pc < addr + 4 )
  1335. {
  1336. return SQLITE_CORRUPT_BKPT();
  1337. }
  1338. size = get2byte( data, pc + 2 );
  1339. if ( size >= nByte )
  1340. {
  1341. int x = size - nByte;
  1342. testcase( x == 4 );
  1343. testcase( x == 3 );
  1344. if ( x < 4 )
  1345. {
  1346. /* Remove the slot from the free-list. Update the number of
  1347. ** fragmented bytes within the page. */
  1348. data[addr + 0] = data[pc + 0];
  1349. data[addr + 1] = data[pc + 1]; //memcpy( data[addr], ref data[pc], 2 );
  1350. data[hdr + 7] = (u8)( nFrag + x );
  1351. }
  1352. else if ( size + pc > usableSize )
  1353. {
  1354. return SQLITE_CORRUPT_BKPT();
  1355. }
  1356. else
  1357. {
  1358. /* The slot remains on the free-list. Reduce its size to account
  1359. ** for the portion used by the new allocation. */
  1360. put2byte( data, pc + 2, x );
  1361. }
  1362. pIdx = pc + x;
  1363. return SQLITE_OK;
  1364. }
  1365. }
  1366. }
  1367. /* Check to make sure there is enough space in the gap to satisfy
  1368. ** the allocation. If not, defragment.
  1369. */
  1370. testcase( gap + 2 + nByte == top );
  1371. if ( gap + 2 + nByte > top )
  1372. {
  1373. rc = defragmentPage( pPage );
  1374. if ( rc != 0 )
  1375. return rc;
  1376. top = get2byteNotZero( data, hdr + 5 );
  1377. Debug.Assert( gap + nByte <= top );
  1378. }
  1379. /* Allocate memory from the gap in between the cell pointer array
  1380. ** and the cell content area. The btreeInitPage() call has already
  1381. ** validated the freelist. Given that the freelist is valid, there
  1382. ** is no way that the allocation can extend off the end of the page.
  1383. ** The Debug.Assert() below verifies the previous sentence.
  1384. */
  1385. top -= nByte;
  1386. put2byte( data, hdr + 5, top );
  1387. Debug.Assert( top + nByte <= (int)pPage.pBt.usableSize );
  1388. pIdx = top;
  1389. return SQLITE_OK;
  1390. }
  1391. /*
  1392. ** Return a section of the pPage.aData to the freelist.
  1393. ** The first byte of the new free block is pPage.aDisk[start]
  1394. ** and the size of the block is "size" bytes.
  1395. **
  1396. ** Most of the effort here is involved in coalesing adjacent
  1397. ** free blocks into a single big free block.
  1398. */
  1399. static int freeSpace( MemPage pPage, u32 start, int size )
  1400. {
  1401. return freeSpace( pPage, (int)start, size );
  1402. }
  1403. static int freeSpace( MemPage pPage, int start, int size )
  1404. {
  1405. int addr, pbegin, hdr;
  1406. int iLast; /* Largest possible freeblock offset */
  1407. byte[] data = pPage.aData;
  1408. Debug.Assert( pPage.pBt != null );
  1409. Debug.Assert( sqlite3PagerIswriteable( pPage.pDbPage ) );
  1410. Debug.Assert( start >= pPage.hdrOffset + 6 + pPage.childPtrSize );
  1411. Debug.Assert( ( start + size ) <= (int)pPage.pBt.usableSize );
  1412. Debug.Assert( sqlite3_mutex_held( pPage.pBt.mutex ) );
  1413. Debug.Assert( size >= 0 ); /* Minimum cell size is 4 */
  1414. if ( pPage.pBt.secureDelete )
  1415. {
  1416. /* Overwrite deleted information with zeros when the secure_delete
  1417. ** option is enabled */
  1418. Array.Clear( data, start, size );// memset(&data[start], 0, size);
  1419. }
  1420. /* Add the space back into the linked list of freeblocks. Note that
  1421. ** even though the freeblock list was checked by btreeInitPage(),
  1422. ** btreeInitPage() did not detect overlapping cells or
  1423. ** freeblocks that overlapped cells. Nor does it detect when the
  1424. ** cell content area exceeds the value in the page header. If these
  1425. ** situations arise, then subsequent insert operations might corrupt
  1426. ** the freelist. So we do need to check for corruption while scanning
  1427. ** the freelist.
  1428. */
  1429. hdr = pPage.hdrOffset;
  1430. addr = hdr + 1;
  1431. iLast = (int)pPage.pBt.usableSize - 4;
  1432. Debug.Assert( start <= iLast );
  1433. while ( ( pbegin = get2byte( data, addr ) ) < start && pbegin > 0 )
  1434. {
  1435. if ( pbegin < addr + 4 )
  1436. {
  1437. return SQLITE_CORRUPT_BKPT();
  1438. }
  1439. addr = pbegin;
  1440. }
  1441. if ( pbegin > iLast )
  1442. {
  1443. return SQLITE_CORRUPT_BKPT();
  1444. }
  1445. Debug.Assert( pbegin > addr || pbegin == 0 );
  1446. put2byte( data, addr, start );
  1447. put2byte( data, start, pbegin );
  1448. put2byte( data, start + 2, size );
  1449. pPage.nFree = (u16)( pPage.nFree + size );
  1450. /* Coalesce adjacent free blocks */
  1451. addr = hdr + 1;
  1452. while ( ( pbegin = get2byte( data, addr ) ) > 0 )
  1453. {
  1454. int pnext, psize, x;
  1455. Debug.Assert( pbegin > addr );
  1456. Debug.Assert( pbegin <= (int)pPage.pBt.usableSize - 4 );
  1457. pnext = get2byte( data, pbegin );
  1458. psize = get2byte( data, pbegin + 2 );
  1459. if ( pbe

Large files files are truncated, but you can click here to view the full file