PageRenderTime 33ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/indra/llrender/llvertexbuffer.cpp

https://bitbucket.org/lindenlab/viewer-beta/
C++ | 2254 lines | 1845 code | 307 blank | 102 comment | 357 complexity | 0dbb5096932ea519ba0cd541fb4619fa MD5 | raw file
Possible License(s): LGPL-2.1
  1. /**
  2. * @file llvertexbuffer.cpp
  3. * @brief LLVertexBuffer implementation
  4. *
  5. * $LicenseInfo:firstyear=2003&license=viewerlgpl$
  6. * Second Life Viewer Source Code
  7. * Copyright (C) 2010, Linden Research, Inc.
  8. *
  9. * This library is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation;
  12. * version 2.1 of the License only.
  13. *
  14. * This library is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with this library; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. *
  23. * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
  24. * $/LicenseInfo$
  25. */
  26. #include "linden_common.h"
  27. #include <boost/static_assert.hpp>
  28. #include "llsys.h"
  29. #include "llvertexbuffer.h"
  30. // #include "llrender.h"
  31. #include "llglheaders.h"
  32. #include "llmemtype.h"
  33. #include "llrender.h"
  34. #include "llvector4a.h"
  35. #include "llshadermgr.h"
  36. #include "llglslshader.h"
  37. #include "llmemory.h"
  38. //Next Highest Power Of Two
  39. //helper function, returns first number > v that is a power of 2, or v if v is already a power of 2
  40. U32 nhpo2(U32 v)
  41. {
  42. U32 r = 1;
  43. while (r < v) {
  44. r *= 2;
  45. }
  46. return r;
  47. }
  48. //============================================================================
  49. //static
  50. LLVBOPool LLVertexBuffer::sStreamVBOPool;
  51. LLVBOPool LLVertexBuffer::sDynamicVBOPool;
  52. LLVBOPool LLVertexBuffer::sStreamIBOPool;
  53. LLVBOPool LLVertexBuffer::sDynamicIBOPool;
  54. U32 LLVBOPool::sBytesPooled = 0;
  55. LLPrivateMemoryPool* LLVertexBuffer::sPrivatePoolp = NULL ;
  56. U32 LLVertexBuffer::sBindCount = 0;
  57. U32 LLVertexBuffer::sSetCount = 0;
  58. S32 LLVertexBuffer::sCount = 0;
  59. S32 LLVertexBuffer::sGLCount = 0;
  60. S32 LLVertexBuffer::sMappedCount = 0;
  61. BOOL LLVertexBuffer::sDisableVBOMapping = FALSE ;
  62. BOOL LLVertexBuffer::sEnableVBOs = TRUE;
  63. U32 LLVertexBuffer::sGLRenderBuffer = 0;
  64. U32 LLVertexBuffer::sGLRenderArray = 0;
  65. U32 LLVertexBuffer::sGLRenderIndices = 0;
  66. U32 LLVertexBuffer::sLastMask = 0;
  67. BOOL LLVertexBuffer::sVBOActive = FALSE;
  68. BOOL LLVertexBuffer::sIBOActive = FALSE;
  69. U32 LLVertexBuffer::sAllocatedBytes = 0;
  70. BOOL LLVertexBuffer::sMapped = FALSE;
  71. BOOL LLVertexBuffer::sUseStreamDraw = TRUE;
  72. BOOL LLVertexBuffer::sUseVAO = FALSE;
  73. BOOL LLVertexBuffer::sPreferStreamDraw = FALSE;
  74. const U32 FENCE_WAIT_TIME_NANOSECONDS = 10000; //1 ms
  75. class LLGLSyncFence : public LLGLFence
  76. {
  77. public:
  78. #ifdef GL_ARB_sync
  79. GLsync mSync;
  80. #endif
  81. LLGLSyncFence()
  82. {
  83. #ifdef GL_ARB_sync
  84. mSync = 0;
  85. #endif
  86. }
  87. virtual ~LLGLSyncFence()
  88. {
  89. #ifdef GL_ARB_sync
  90. if (mSync)
  91. {
  92. glDeleteSync(mSync);
  93. }
  94. #endif
  95. }
  96. void placeFence()
  97. {
  98. #ifdef GL_ARB_sync
  99. if (mSync)
  100. {
  101. glDeleteSync(mSync);
  102. }
  103. mSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
  104. #endif
  105. }
  106. void wait()
  107. {
  108. #ifdef GL_ARB_sync
  109. if (mSync)
  110. {
  111. while (glClientWaitSync(mSync, 0, FENCE_WAIT_TIME_NANOSECONDS) == GL_TIMEOUT_EXPIRED)
  112. { //track the number of times we've waited here
  113. static S32 waits = 0;
  114. waits++;
  115. }
  116. }
  117. #endif
  118. }
  119. };
  120. //which power of 2 is i?
  121. //assumes i is a power of 2 > 0
  122. U32 wpo2(U32 i)
  123. {
  124. llassert(i > 0);
  125. llassert(nhpo2(i) == i);
  126. U32 r = 0;
  127. while (i >>= 1) ++r;
  128. return r;
  129. }
  130. U8* LLVBOPool::allocate(U32& name, U32 size)
  131. {
  132. llassert(nhpo2(size) == size);
  133. U32 i = wpo2(size);
  134. if (mFreeList.size() <= i)
  135. {
  136. mFreeList.resize(i+1);
  137. }
  138. U8* ret = NULL;
  139. if (mFreeList[i].empty())
  140. {
  141. //make a new buffer
  142. glGenBuffersARB(1, &name);
  143. glBindBufferARB(mType, name);
  144. glBufferDataARB(mType, size, 0, mUsage);
  145. LLVertexBuffer::sAllocatedBytes += size;
  146. if (LLVertexBuffer::sDisableVBOMapping)
  147. {
  148. ret = (U8*) ll_aligned_malloc_16(size);
  149. }
  150. glBindBufferARB(mType, 0);
  151. }
  152. else
  153. {
  154. name = mFreeList[i].front().mGLName;
  155. ret = mFreeList[i].front().mClientData;
  156. sBytesPooled -= size;
  157. mFreeList[i].pop_front();
  158. }
  159. return ret;
  160. }
  161. void LLVBOPool::release(U32 name, U8* buffer, U32 size)
  162. {
  163. llassert(nhpo2(size) == size);
  164. U32 i = wpo2(size);
  165. llassert(mFreeList.size() > i);
  166. Record rec;
  167. rec.mGLName = name;
  168. rec.mClientData = buffer;
  169. sBytesPooled += size;
  170. mFreeList[i].push_back(rec);
  171. }
  172. void LLVBOPool::cleanup()
  173. {
  174. U32 size = 1;
  175. for (U32 i = 0; i < mFreeList.size(); ++i)
  176. {
  177. record_list_t& l = mFreeList[i];
  178. while (!l.empty())
  179. {
  180. Record& r = l.front();
  181. glDeleteBuffersARB(1, &r.mGLName);
  182. if (r.mClientData)
  183. {
  184. ll_aligned_free_16(r.mClientData);
  185. }
  186. l.pop_front();
  187. LLVertexBuffer::sAllocatedBytes -= size;
  188. sBytesPooled -= size;
  189. }
  190. size *= 2;
  191. }
  192. }
  193. //NOTE: each component must be AT LEAST 4 bytes in size to avoid a performance penalty on AMD hardware
  194. S32 LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_MAX] =
  195. {
  196. sizeof(LLVector4), // TYPE_VERTEX,
  197. sizeof(LLVector4), // TYPE_NORMAL,
  198. sizeof(LLVector2), // TYPE_TEXCOORD0,
  199. sizeof(LLVector2), // TYPE_TEXCOORD1,
  200. sizeof(LLVector2), // TYPE_TEXCOORD2,
  201. sizeof(LLVector2), // TYPE_TEXCOORD3,
  202. sizeof(LLColor4U), // TYPE_COLOR,
  203. sizeof(LLColor4U), // TYPE_EMISSIVE, only alpha is used currently
  204. sizeof(LLVector4), // TYPE_BINORMAL,
  205. sizeof(F32), // TYPE_WEIGHT,
  206. sizeof(LLVector4), // TYPE_WEIGHT4,
  207. sizeof(LLVector4), // TYPE_CLOTHWEIGHT,
  208. sizeof(LLVector4), // TYPE_TEXTURE_INDEX (actually exists as position.w), no extra data, but stride is 16 bytes
  209. };
  210. U32 LLVertexBuffer::sGLMode[LLRender::NUM_MODES] =
  211. {
  212. GL_TRIANGLES,
  213. GL_TRIANGLE_STRIP,
  214. GL_TRIANGLE_FAN,
  215. GL_POINTS,
  216. GL_LINES,
  217. GL_LINE_STRIP,
  218. GL_QUADS,
  219. GL_LINE_LOOP,
  220. };
  221. //static
  222. void LLVertexBuffer::setupClientArrays(U32 data_mask)
  223. {
  224. if (sLastMask != data_mask)
  225. {
  226. BOOL error = FALSE;
  227. if (LLGLSLShader::sNoFixedFunction)
  228. {
  229. for (U32 i = 0; i < TYPE_MAX; ++i)
  230. {
  231. S32 loc = i;
  232. U32 mask = 1 << i;
  233. if (sLastMask & (1 << i))
  234. { //was enabled
  235. if (!(data_mask & mask))
  236. { //needs to be disabled
  237. glDisableVertexAttribArrayARB(loc);
  238. }
  239. }
  240. else
  241. { //was disabled
  242. if (data_mask & mask)
  243. { //needs to be enabled
  244. glEnableVertexAttribArrayARB(loc);
  245. }
  246. }
  247. }
  248. }
  249. else
  250. {
  251. GLenum array[] =
  252. {
  253. GL_VERTEX_ARRAY,
  254. GL_NORMAL_ARRAY,
  255. GL_TEXTURE_COORD_ARRAY,
  256. GL_COLOR_ARRAY,
  257. };
  258. GLenum mask[] =
  259. {
  260. MAP_VERTEX,
  261. MAP_NORMAL,
  262. MAP_TEXCOORD0,
  263. MAP_COLOR
  264. };
  265. for (U32 i = 0; i < 4; ++i)
  266. {
  267. if (sLastMask & mask[i])
  268. { //was enabled
  269. if (!(data_mask & mask[i]))
  270. { //needs to be disabled
  271. glDisableClientState(array[i]);
  272. }
  273. else if (gDebugGL)
  274. { //needs to be enabled, make sure it was (DEBUG)
  275. if (!glIsEnabled(array[i]))
  276. {
  277. if (gDebugSession)
  278. {
  279. error = TRUE;
  280. gFailLog << "Bad client state! " << array[i] << " disabled." << std::endl;
  281. }
  282. else
  283. {
  284. llerrs << "Bad client state! " << array[i] << " disabled." << llendl;
  285. }
  286. }
  287. }
  288. }
  289. else
  290. { //was disabled
  291. if (data_mask & mask[i])
  292. { //needs to be enabled
  293. glEnableClientState(array[i]);
  294. }
  295. else if (gDebugGL && glIsEnabled(array[i]))
  296. { //needs to be disabled, make sure it was (DEBUG TEMPORARY)
  297. if (gDebugSession)
  298. {
  299. error = TRUE;
  300. gFailLog << "Bad client state! " << array[i] << " enabled." << std::endl;
  301. }
  302. else
  303. {
  304. llerrs << "Bad client state! " << array[i] << " enabled." << llendl;
  305. }
  306. }
  307. }
  308. }
  309. U32 map_tc[] =
  310. {
  311. MAP_TEXCOORD1,
  312. MAP_TEXCOORD2,
  313. MAP_TEXCOORD3
  314. };
  315. for (U32 i = 0; i < 3; i++)
  316. {
  317. if (sLastMask & map_tc[i])
  318. {
  319. if (!(data_mask & map_tc[i]))
  320. { //disable
  321. glClientActiveTextureARB(GL_TEXTURE1_ARB+i);
  322. glDisableClientState(GL_TEXTURE_COORD_ARRAY);
  323. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  324. }
  325. }
  326. else if (data_mask & map_tc[i])
  327. {
  328. glClientActiveTextureARB(GL_TEXTURE1_ARB+i);
  329. glEnableClientState(GL_TEXTURE_COORD_ARRAY);
  330. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  331. }
  332. }
  333. if (sLastMask & MAP_BINORMAL)
  334. {
  335. if (!(data_mask & MAP_BINORMAL))
  336. {
  337. glClientActiveTextureARB(GL_TEXTURE2_ARB);
  338. glDisableClientState(GL_TEXTURE_COORD_ARRAY);
  339. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  340. }
  341. }
  342. else if (data_mask & MAP_BINORMAL)
  343. {
  344. glClientActiveTextureARB(GL_TEXTURE2_ARB);
  345. glEnableClientState(GL_TEXTURE_COORD_ARRAY);
  346. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  347. }
  348. }
  349. sLastMask = data_mask;
  350. }
  351. }
  352. //static
  353. void LLVertexBuffer::drawArrays(U32 mode, const std::vector<LLVector3>& pos, const std::vector<LLVector3>& norm)
  354. {
  355. llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
  356. gGL.syncMatrices();
  357. U32 count = pos.size();
  358. llassert_always(norm.size() >= pos.size());
  359. llassert_always(count > 0) ;
  360. unbind();
  361. setupClientArrays(MAP_VERTEX | MAP_NORMAL);
  362. LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
  363. if (shader)
  364. {
  365. S32 loc = LLVertexBuffer::TYPE_VERTEX;
  366. if (loc > -1)
  367. {
  368. glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 0, pos[0].mV);
  369. }
  370. loc = LLVertexBuffer::TYPE_NORMAL;
  371. if (loc > -1)
  372. {
  373. glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 0, norm[0].mV);
  374. }
  375. }
  376. else
  377. {
  378. glVertexPointer(3, GL_FLOAT, 0, pos[0].mV);
  379. glNormalPointer(GL_FLOAT, 0, norm[0].mV);
  380. }
  381. glDrawArrays(sGLMode[mode], 0, count);
  382. }
  383. //static
  384. void LLVertexBuffer::drawElements(U32 mode, const LLVector4a* pos, const LLVector2* tc, S32 num_indices, const U16* indicesp)
  385. {
  386. llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
  387. gGL.syncMatrices();
  388. U32 mask = LLVertexBuffer::MAP_VERTEX;
  389. if (tc)
  390. {
  391. mask = mask | LLVertexBuffer::MAP_TEXCOORD0;
  392. }
  393. unbind();
  394. setupClientArrays(mask);
  395. if (LLGLSLShader::sNoFixedFunction)
  396. {
  397. S32 loc = LLVertexBuffer::TYPE_VERTEX;
  398. glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 16, pos);
  399. if (tc)
  400. {
  401. loc = LLVertexBuffer::TYPE_TEXCOORD0;
  402. glVertexAttribPointerARB(loc, 2, GL_FLOAT, GL_FALSE, 0, tc);
  403. }
  404. }
  405. else
  406. {
  407. glTexCoordPointer(2, GL_FLOAT, 0, tc);
  408. glVertexPointer(3, GL_FLOAT, 16, pos);
  409. }
  410. glDrawElements(sGLMode[mode], num_indices, GL_UNSIGNED_SHORT, indicesp);
  411. }
  412. void LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_offset) const
  413. {
  414. if (start >= (U32) mNumVerts ||
  415. end >= (U32) mNumVerts)
  416. {
  417. llerrs << "Bad vertex buffer draw range: [" << start << ", " << end << "] vs " << mNumVerts << llendl;
  418. }
  419. llassert(mNumIndices >= 0);
  420. if (indices_offset >= (U32) mNumIndices ||
  421. indices_offset + count > (U32) mNumIndices)
  422. {
  423. llerrs << "Bad index buffer draw range: [" << indices_offset << ", " << indices_offset+count << "]" << llendl;
  424. }
  425. if (gDebugGL && !useVBOs())
  426. {
  427. U16* idx = ((U16*) getIndicesPointer())+indices_offset;
  428. for (U32 i = 0; i < count; ++i)
  429. {
  430. if (idx[i] < start || idx[i] > end)
  431. {
  432. llerrs << "Index out of range: " << idx[i] << " not in [" << start << ", " << end << "]" << llendl;
  433. }
  434. }
  435. LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
  436. if (shader && shader->mFeatures.mIndexedTextureChannels > 1)
  437. {
  438. LLStrider<LLVector4a> v;
  439. //hack to get non-const reference
  440. LLVertexBuffer* vb = (LLVertexBuffer*) this;
  441. vb->getVertexStrider(v);
  442. for (U32 i = start; i < end; i++)
  443. {
  444. S32 idx = (S32) (v[i][3]+0.25f);
  445. if (idx < 0 || idx >= shader->mFeatures.mIndexedTextureChannels)
  446. {
  447. llerrs << "Bad texture index found in vertex data stream." << llendl;
  448. }
  449. }
  450. }
  451. }
  452. }
  453. void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const
  454. {
  455. validateRange(start, end, count, indices_offset);
  456. gGL.syncMatrices();
  457. llassert(mNumVerts >= 0);
  458. llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
  459. if (mGLArray)
  460. {
  461. if (mGLArray != sGLRenderArray)
  462. {
  463. llerrs << "Wrong vertex array bound." << llendl;
  464. }
  465. }
  466. else
  467. {
  468. if (mGLIndices != sGLRenderIndices)
  469. {
  470. llerrs << "Wrong index buffer bound." << llendl;
  471. }
  472. if (mGLBuffer != sGLRenderBuffer)
  473. {
  474. llerrs << "Wrong vertex buffer bound." << llendl;
  475. }
  476. }
  477. if (gDebugGL && !mGLArray && useVBOs())
  478. {
  479. GLint elem = 0;
  480. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &elem);
  481. if (elem != mGLIndices)
  482. {
  483. llerrs << "Wrong index buffer bound!" << llendl;
  484. }
  485. }
  486. if (mode >= LLRender::NUM_MODES)
  487. {
  488. llerrs << "Invalid draw mode: " << mode << llendl;
  489. return;
  490. }
  491. U16* idx = ((U16*) getIndicesPointer())+indices_offset;
  492. stop_glerror();
  493. glDrawRangeElements(sGLMode[mode], start, end, count, GL_UNSIGNED_SHORT,
  494. idx);
  495. stop_glerror();
  496. placeFence();
  497. }
  498. void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
  499. {
  500. llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
  501. gGL.syncMatrices();
  502. llassert(mNumIndices >= 0);
  503. if (indices_offset >= (U32) mNumIndices ||
  504. indices_offset + count > (U32) mNumIndices)
  505. {
  506. llerrs << "Bad index buffer draw range: [" << indices_offset << ", " << indices_offset+count << "]" << llendl;
  507. }
  508. if (mGLArray)
  509. {
  510. if (mGLArray != sGLRenderArray)
  511. {
  512. llerrs << "Wrong vertex array bound." << llendl;
  513. }
  514. }
  515. else
  516. {
  517. if (mGLIndices != sGLRenderIndices)
  518. {
  519. llerrs << "Wrong index buffer bound." << llendl;
  520. }
  521. if (mGLBuffer != sGLRenderBuffer)
  522. {
  523. llerrs << "Wrong vertex buffer bound." << llendl;
  524. }
  525. }
  526. if (mode >= LLRender::NUM_MODES)
  527. {
  528. llerrs << "Invalid draw mode: " << mode << llendl;
  529. return;
  530. }
  531. stop_glerror();
  532. glDrawElements(sGLMode[mode], count, GL_UNSIGNED_SHORT,
  533. ((U16*) getIndicesPointer()) + indices_offset);
  534. stop_glerror();
  535. placeFence();
  536. }
  537. void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
  538. {
  539. llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
  540. gGL.syncMatrices();
  541. llassert(mNumVerts >= 0);
  542. if (first >= (U32) mNumVerts ||
  543. first + count > (U32) mNumVerts)
  544. {
  545. llerrs << "Bad vertex buffer draw range: [" << first << ", " << first+count << "]" << llendl;
  546. }
  547. if (mGLArray)
  548. {
  549. if (mGLArray != sGLRenderArray)
  550. {
  551. llerrs << "Wrong vertex array bound." << llendl;
  552. }
  553. }
  554. else
  555. {
  556. if (mGLBuffer != sGLRenderBuffer || useVBOs() != sVBOActive)
  557. {
  558. llerrs << "Wrong vertex buffer bound." << llendl;
  559. }
  560. }
  561. if (mode >= LLRender::NUM_MODES)
  562. {
  563. llerrs << "Invalid draw mode: " << mode << llendl;
  564. return;
  565. }
  566. stop_glerror();
  567. glDrawArrays(sGLMode[mode], first, count);
  568. stop_glerror();
  569. placeFence();
  570. }
  571. //static
  572. void LLVertexBuffer::initClass(bool use_vbo, bool no_vbo_mapping)
  573. {
  574. sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject ;
  575. sDisableVBOMapping = sEnableVBOs && no_vbo_mapping ;
  576. if(!sPrivatePoolp)
  577. {
  578. sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(LLPrivateMemoryPool::STATIC) ;
  579. }
  580. sStreamVBOPool.mType = GL_ARRAY_BUFFER_ARB;
  581. sStreamVBOPool.mUsage= GL_STREAM_DRAW_ARB;
  582. sStreamIBOPool.mType = GL_ELEMENT_ARRAY_BUFFER_ARB;
  583. sStreamIBOPool.mUsage= GL_STREAM_DRAW_ARB;
  584. sDynamicVBOPool.mType = GL_ARRAY_BUFFER_ARB;
  585. sDynamicVBOPool.mUsage= GL_DYNAMIC_DRAW_ARB;
  586. sDynamicIBOPool.mType = GL_ELEMENT_ARRAY_BUFFER_ARB;
  587. sDynamicIBOPool.mUsage= GL_DYNAMIC_DRAW_ARB;
  588. }
  589. //static
  590. void LLVertexBuffer::unbind()
  591. {
  592. if (sGLRenderArray)
  593. {
  594. #if GL_ARB_vertex_array_object
  595. glBindVertexArray(0);
  596. #endif
  597. sGLRenderArray = 0;
  598. sGLRenderIndices = 0;
  599. sIBOActive = FALSE;
  600. }
  601. if (sVBOActive)
  602. {
  603. glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
  604. sVBOActive = FALSE;
  605. }
  606. if (sIBOActive)
  607. {
  608. glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
  609. sIBOActive = FALSE;
  610. }
  611. sGLRenderBuffer = 0;
  612. sGLRenderIndices = 0;
  613. setupClientArrays(0);
  614. }
  615. //static
  616. void LLVertexBuffer::cleanupClass()
  617. {
  618. LLMemType mt2(LLMemType::MTYPE_VERTEX_CLEANUP_CLASS);
  619. unbind();
  620. sStreamIBOPool.cleanup();
  621. sDynamicIBOPool.cleanup();
  622. sStreamVBOPool.cleanup();
  623. sDynamicVBOPool.cleanup();
  624. if(sPrivatePoolp)
  625. {
  626. LLPrivateMemoryPoolManager::getInstance()->deletePool(sPrivatePoolp) ;
  627. sPrivatePoolp = NULL ;
  628. }
  629. }
  630. //----------------------------------------------------------------------------
  631. LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
  632. LLRefCount(),
  633. mNumVerts(0),
  634. mNumIndices(0),
  635. mUsage(usage),
  636. mGLBuffer(0),
  637. mGLArray(0),
  638. mGLIndices(0),
  639. mMappedData(NULL),
  640. mMappedIndexData(NULL),
  641. mVertexLocked(FALSE),
  642. mIndexLocked(FALSE),
  643. mFinal(FALSE),
  644. mEmpty(TRUE),
  645. mFence(NULL)
  646. {
  647. LLMemType mt2(LLMemType::MTYPE_VERTEX_CONSTRUCTOR);
  648. mFence = NULL;
  649. if (!sEnableVBOs)
  650. {
  651. mUsage = 0 ;
  652. }
  653. if (mUsage == GL_STREAM_DRAW_ARB && !sUseStreamDraw)
  654. {
  655. mUsage = 0;
  656. }
  657. if (mUsage == GL_DYNAMIC_DRAW_ARB && sPreferStreamDraw)
  658. {
  659. mUsage = GL_STREAM_DRAW_ARB;
  660. }
  661. if (mUsage == 0 && LLRender::sGLCoreProfile)
  662. { //MUST use VBOs for all rendering
  663. mUsage = GL_STREAM_DRAW_ARB;
  664. }
  665. if (mUsage && mUsage != GL_STREAM_DRAW_ARB)
  666. { //only stream_draw and dynamic_draw are supported when using VBOs, dynamic draw is the default
  667. mUsage = GL_DYNAMIC_DRAW_ARB;
  668. }
  669. //zero out offsets
  670. for (U32 i = 0; i < TYPE_MAX; i++)
  671. {
  672. mOffsets[i] = 0;
  673. }
  674. mTypeMask = typemask;
  675. mSize = 0;
  676. mIndicesSize = 0;
  677. mAlignedOffset = 0;
  678. mAlignedIndexOffset = 0;
  679. sCount++;
  680. }
  681. //static
  682. S32 LLVertexBuffer::calcOffsets(const U32& typemask, S32* offsets, S32 num_vertices)
  683. {
  684. S32 offset = 0;
  685. for (S32 i=0; i<TYPE_TEXTURE_INDEX; i++)
  686. {
  687. U32 mask = 1<<i;
  688. if (typemask & mask)
  689. {
  690. if (offsets && LLVertexBuffer::sTypeSize[i])
  691. {
  692. offsets[i] = offset;
  693. offset += LLVertexBuffer::sTypeSize[i]*num_vertices;
  694. offset = (offset + 0xF) & ~0xF;
  695. }
  696. }
  697. }
  698. offsets[TYPE_TEXTURE_INDEX] = offsets[TYPE_VERTEX] + 12;
  699. return offset+16;
  700. }
  701. //static
  702. S32 LLVertexBuffer::calcVertexSize(const U32& typemask)
  703. {
  704. S32 size = 0;
  705. for (S32 i = 0; i < TYPE_TEXTURE_INDEX; i++)
  706. {
  707. U32 mask = 1<<i;
  708. if (typemask & mask)
  709. {
  710. size += LLVertexBuffer::sTypeSize[i];
  711. }
  712. }
  713. return size;
  714. }
  715. S32 LLVertexBuffer::getSize() const
  716. {
  717. return mSize;
  718. }
  719. // protected, use unref()
  720. //virtual
  721. LLVertexBuffer::~LLVertexBuffer()
  722. {
  723. LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTRUCTOR);
  724. destroyGLBuffer();
  725. destroyGLIndices();
  726. if (mGLArray)
  727. {
  728. #if GL_ARB_vertex_array_object
  729. glDeleteVertexArrays(1, &mGLArray);
  730. #endif
  731. }
  732. sCount--;
  733. if (mFence)
  734. {
  735. delete mFence;
  736. }
  737. mFence = NULL;
  738. llassert_always(!mMappedData && !mMappedIndexData) ;
  739. };
  740. void LLVertexBuffer::placeFence() const
  741. {
  742. /*if (!mFence && useVBOs())
  743. {
  744. if (gGLManager.mHasSync)
  745. {
  746. mFence = new LLGLSyncFence();
  747. }
  748. }
  749. if (mFence)
  750. {
  751. mFence->placeFence();
  752. }*/
  753. }
  754. void LLVertexBuffer::waitFence() const
  755. {
  756. /*if (mFence)
  757. {
  758. mFence->wait();
  759. }*/
  760. }
  761. //----------------------------------------------------------------------------
  762. void LLVertexBuffer::genBuffer(U32 size)
  763. {
  764. mSize = nhpo2(size);
  765. if (mUsage == GL_STREAM_DRAW_ARB)
  766. {
  767. mMappedData = sStreamVBOPool.allocate(mGLBuffer, mSize);
  768. }
  769. else
  770. {
  771. mMappedData = sDynamicVBOPool.allocate(mGLBuffer, mSize);
  772. }
  773. sGLCount++;
  774. }
  775. void LLVertexBuffer::genIndices(U32 size)
  776. {
  777. mIndicesSize = nhpo2(size);
  778. if (mUsage == GL_STREAM_DRAW_ARB)
  779. {
  780. mMappedIndexData = sStreamIBOPool.allocate(mGLIndices, mIndicesSize);
  781. }
  782. else
  783. {
  784. mMappedIndexData = sDynamicIBOPool.allocate(mGLIndices, mIndicesSize);
  785. }
  786. sGLCount++;
  787. }
  788. void LLVertexBuffer::releaseBuffer()
  789. {
  790. if (mUsage == GL_STREAM_DRAW_ARB)
  791. {
  792. sStreamVBOPool.release(mGLBuffer, mMappedData, mSize);
  793. }
  794. else
  795. {
  796. sDynamicVBOPool.release(mGLBuffer, mMappedData, mSize);
  797. }
  798. mGLBuffer = 0;
  799. mMappedData = NULL;
  800. sGLCount--;
  801. }
  802. void LLVertexBuffer::releaseIndices()
  803. {
  804. if (mUsage == GL_STREAM_DRAW_ARB)
  805. {
  806. sStreamIBOPool.release(mGLIndices, mMappedIndexData, mIndicesSize);
  807. }
  808. else
  809. {
  810. sDynamicIBOPool.release(mGLIndices, mMappedIndexData, mIndicesSize);
  811. }
  812. mGLIndices = 0;
  813. mMappedIndexData = NULL;
  814. sGLCount--;
  815. }
  816. void LLVertexBuffer::createGLBuffer(U32 size)
  817. {
  818. LLMemType mt2(LLMemType::MTYPE_VERTEX_CREATE_VERTICES);
  819. if (mGLBuffer)
  820. {
  821. destroyGLBuffer();
  822. }
  823. if (size == 0)
  824. {
  825. return;
  826. }
  827. mEmpty = TRUE;
  828. if (useVBOs())
  829. {
  830. genBuffer(size);
  831. }
  832. else
  833. {
  834. static int gl_buffer_idx = 0;
  835. mGLBuffer = ++gl_buffer_idx;
  836. mMappedData = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
  837. mSize = size;
  838. }
  839. }
  840. void LLVertexBuffer::createGLIndices(U32 size)
  841. {
  842. LLMemType mt2(LLMemType::MTYPE_VERTEX_CREATE_INDICES);
  843. if (mGLIndices)
  844. {
  845. destroyGLIndices();
  846. }
  847. if (size == 0)
  848. {
  849. return;
  850. }
  851. mEmpty = TRUE;
  852. //pad by 16 bytes for aligned copies
  853. size += 16;
  854. if (useVBOs())
  855. {
  856. //pad by another 16 bytes for VBO pointer adjustment
  857. size += 16;
  858. genIndices(size);
  859. }
  860. else
  861. {
  862. mMappedIndexData = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
  863. static int gl_buffer_idx = 0;
  864. mGLIndices = ++gl_buffer_idx;
  865. mIndicesSize = size;
  866. }
  867. }
  868. void LLVertexBuffer::destroyGLBuffer()
  869. {
  870. LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_BUFFER);
  871. if (mGLBuffer)
  872. {
  873. if (useVBOs())
  874. {
  875. releaseBuffer();
  876. }
  877. else
  878. {
  879. FREE_MEM(sPrivatePoolp, mMappedData) ;
  880. mMappedData = NULL;
  881. mEmpty = TRUE;
  882. }
  883. }
  884. mGLBuffer = 0;
  885. //unbind();
  886. }
  887. void LLVertexBuffer::destroyGLIndices()
  888. {
  889. LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_INDICES);
  890. if (mGLIndices)
  891. {
  892. if (useVBOs())
  893. {
  894. releaseIndices();
  895. }
  896. else
  897. {
  898. FREE_MEM(sPrivatePoolp, mMappedIndexData) ;
  899. mMappedIndexData = NULL;
  900. mEmpty = TRUE;
  901. }
  902. }
  903. mGLIndices = 0;
  904. //unbind();
  905. }
  906. void LLVertexBuffer::updateNumVerts(S32 nverts)
  907. {
  908. LLMemType mt2(LLMemType::MTYPE_VERTEX_UPDATE_VERTS);
  909. llassert(nverts >= 0);
  910. if (nverts >= 65535)
  911. {
  912. llwarns << "Vertex buffer overflow!" << llendl;
  913. nverts = 65535;
  914. }
  915. U32 needed_size = calcOffsets(mTypeMask, mOffsets, nverts);
  916. if (needed_size > mSize || needed_size <= mSize/2)
  917. {
  918. createGLBuffer(needed_size);
  919. }
  920. mNumVerts = nverts;
  921. }
  922. void LLVertexBuffer::updateNumIndices(S32 nindices)
  923. {
  924. LLMemType mt2(LLMemType::MTYPE_VERTEX_UPDATE_INDICES);
  925. llassert(nindices >= 0);
  926. U32 needed_size = sizeof(U16) * nindices;
  927. if (needed_size > mIndicesSize || needed_size <= mIndicesSize/2)
  928. {
  929. createGLIndices(needed_size);
  930. }
  931. mNumIndices = nindices;
  932. }
  933. void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create)
  934. {
  935. LLMemType mt2(LLMemType::MTYPE_VERTEX_ALLOCATE_BUFFER);
  936. stop_glerror();
  937. if (nverts < 0 || nindices < 0 ||
  938. nverts > 65536)
  939. {
  940. llerrs << "Bad vertex buffer allocation: " << nverts << " : " << nindices << llendl;
  941. }
  942. updateNumVerts(nverts);
  943. updateNumIndices(nindices);
  944. if (create && (nverts || nindices))
  945. {
  946. //actually allocate space for the vertex buffer if using VBO mapping
  947. flush();
  948. if (gGLManager.mHasVertexArrayObject && useVBOs() && (LLRender::sGLCoreProfile || sUseVAO))
  949. {
  950. #if GL_ARB_vertex_array_object
  951. glGenVertexArrays(1, &mGLArray);
  952. #endif
  953. setupVertexArray();
  954. }
  955. }
  956. }
  957. static LLFastTimer::DeclareTimer FTM_SETUP_VERTEX_ARRAY("Setup VAO");
  958. void LLVertexBuffer::setupVertexArray()
  959. {
  960. if (!mGLArray)
  961. {
  962. return;
  963. }
  964. LLFastTimer t(FTM_SETUP_VERTEX_ARRAY);
  965. #if GL_ARB_vertex_array_object
  966. glBindVertexArray(mGLArray);
  967. #endif
  968. sGLRenderArray = mGLArray;
  969. U32 attrib_size[] =
  970. {
  971. 3, //TYPE_VERTEX,
  972. 3, //TYPE_NORMAL,
  973. 2, //TYPE_TEXCOORD0,
  974. 2, //TYPE_TEXCOORD1,
  975. 2, //TYPE_TEXCOORD2,
  976. 2, //TYPE_TEXCOORD3,
  977. 4, //TYPE_COLOR,
  978. 4, //TYPE_EMISSIVE,
  979. 3, //TYPE_BINORMAL,
  980. 1, //TYPE_WEIGHT,
  981. 4, //TYPE_WEIGHT4,
  982. 4, //TYPE_CLOTHWEIGHT,
  983. 1, //TYPE_TEXTURE_INDEX
  984. };
  985. U32 attrib_type[] =
  986. {
  987. GL_FLOAT, //TYPE_VERTEX,
  988. GL_FLOAT, //TYPE_NORMAL,
  989. GL_FLOAT, //TYPE_TEXCOORD0,
  990. GL_FLOAT, //TYPE_TEXCOORD1,
  991. GL_FLOAT, //TYPE_TEXCOORD2,
  992. GL_FLOAT, //TYPE_TEXCOORD3,
  993. GL_UNSIGNED_BYTE, //TYPE_COLOR,
  994. GL_UNSIGNED_BYTE, //TYPE_EMISSIVE,
  995. GL_FLOAT, //TYPE_BINORMAL,
  996. GL_FLOAT, //TYPE_WEIGHT,
  997. GL_FLOAT, //TYPE_WEIGHT4,
  998. GL_FLOAT, //TYPE_CLOTHWEIGHT,
  999. GL_FLOAT, //TYPE_TEXTURE_INDEX
  1000. };
  1001. U32 attrib_normalized[] =
  1002. {
  1003. GL_FALSE, //TYPE_VERTEX,
  1004. GL_FALSE, //TYPE_NORMAL,
  1005. GL_FALSE, //TYPE_TEXCOORD0,
  1006. GL_FALSE, //TYPE_TEXCOORD1,
  1007. GL_FALSE, //TYPE_TEXCOORD2,
  1008. GL_FALSE, //TYPE_TEXCOORD3,
  1009. GL_TRUE, //TYPE_COLOR,
  1010. GL_TRUE, //TYPE_EMISSIVE,
  1011. GL_FALSE, //TYPE_BINORMAL,
  1012. GL_FALSE, //TYPE_WEIGHT,
  1013. GL_FALSE, //TYPE_WEIGHT4,
  1014. GL_FALSE, //TYPE_CLOTHWEIGHT,
  1015. GL_FALSE, //TYPE_TEXTURE_INDEX
  1016. };
  1017. bindGLBuffer(true);
  1018. bindGLIndices(true);
  1019. for (U32 i = 0; i < TYPE_MAX; ++i)
  1020. {
  1021. if (mTypeMask & (1 << i))
  1022. {
  1023. glEnableVertexAttribArrayARB(i);
  1024. glVertexAttribPointerARB(i, attrib_size[i], attrib_type[i], attrib_normalized[i], sTypeSize[i], (void*) mOffsets[i]);
  1025. }
  1026. else
  1027. {
  1028. glDisableVertexAttribArrayARB(i);
  1029. }
  1030. }
  1031. //draw a dummy triangle to set index array pointer
  1032. //glDrawElements(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, NULL);
  1033. unbind();
  1034. }
  1035. void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices)
  1036. {
  1037. llassert(newnverts >= 0);
  1038. llassert(newnindices >= 0);
  1039. LLMemType mt2(LLMemType::MTYPE_VERTEX_RESIZE_BUFFER);
  1040. updateNumVerts(newnverts);
  1041. updateNumIndices(newnindices);
  1042. if (useVBOs())
  1043. {
  1044. flush();
  1045. if (mGLArray)
  1046. { //if size changed, offsets changed
  1047. setupVertexArray();
  1048. }
  1049. }
  1050. }
  1051. BOOL LLVertexBuffer::useVBOs() const
  1052. {
  1053. //it's generally ineffective to use VBO for things that are streaming on apple
  1054. if (!mUsage)
  1055. {
  1056. return FALSE;
  1057. }
  1058. return TRUE;
  1059. }
  1060. //----------------------------------------------------------------------------
  1061. bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count)
  1062. {
  1063. S32 end = index+count;
  1064. S32 region_end = region.mIndex+region.mCount;
  1065. if (end < region.mIndex ||
  1066. index > region_end)
  1067. { //gap exists, do not merge
  1068. return false;
  1069. }
  1070. S32 new_end = llmax(end, region_end);
  1071. S32 new_index = llmin(index, region.mIndex);
  1072. region.mIndex = new_index;
  1073. region.mCount = new_end-new_index;
  1074. return true;
  1075. }
  1076. // Map for data access
  1077. U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
  1078. {
  1079. bindGLBuffer(true);
  1080. LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
  1081. if (mFinal)
  1082. {
  1083. llerrs << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << llendl;
  1084. }
  1085. if (!useVBOs() && !mMappedData && !mMappedIndexData)
  1086. {
  1087. llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl;
  1088. }
  1089. if (useVBOs())
  1090. {
  1091. if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
  1092. {
  1093. if (count == -1)
  1094. {
  1095. count = mNumVerts-index;
  1096. }
  1097. bool mapped = false;
  1098. //see if range is already mapped
  1099. for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
  1100. {
  1101. MappedRegion& region = mMappedVertexRegions[i];
  1102. if (region.mType == type)
  1103. {
  1104. if (expand_region(region, index, count))
  1105. {
  1106. mapped = true;
  1107. break;
  1108. }
  1109. }
  1110. }
  1111. if (!mapped)
  1112. {
  1113. //not already mapped, map new region
  1114. MappedRegion region(type, !sDisableVBOMapping && map_range ? -1 : index, count);
  1115. mMappedVertexRegions.push_back(region);
  1116. }
  1117. }
  1118. if (mVertexLocked && map_range)
  1119. {
  1120. llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl;
  1121. }
  1122. if (!mVertexLocked)
  1123. {
  1124. LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
  1125. mVertexLocked = TRUE;
  1126. sMappedCount++;
  1127. stop_glerror();
  1128. if(sDisableVBOMapping)
  1129. {
  1130. map_range = false;
  1131. }
  1132. else
  1133. {
  1134. U8* src = NULL;
  1135. waitFence();
  1136. if (gGLManager.mHasMapBufferRange)
  1137. {
  1138. if (map_range)
  1139. {
  1140. #ifdef GL_ARB_map_buffer_range
  1141. S32 offset = mOffsets[type] + sTypeSize[type]*index;
  1142. S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
  1143. src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
  1144. GL_MAP_WRITE_BIT |
  1145. GL_MAP_FLUSH_EXPLICIT_BIT |
  1146. GL_MAP_INVALIDATE_RANGE_BIT);
  1147. #endif
  1148. }
  1149. else
  1150. {
  1151. #ifdef GL_ARB_map_buffer_range
  1152. if (gDebugGL)
  1153. {
  1154. GLint size = 0;
  1155. glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size);
  1156. if (size < mSize)
  1157. {
  1158. llerrs << "Invalid buffer size." << llendl;
  1159. }
  1160. }
  1161. src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
  1162. GL_MAP_WRITE_BIT |
  1163. GL_MAP_FLUSH_EXPLICIT_BIT);
  1164. #endif
  1165. }
  1166. }
  1167. else if (gGLManager.mHasFlushBufferRange)
  1168. {
  1169. if (map_range)
  1170. {
  1171. glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
  1172. glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
  1173. src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1174. }
  1175. else
  1176. {
  1177. src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1178. }
  1179. }
  1180. else
  1181. {
  1182. map_range = false;
  1183. src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1184. }
  1185. llassert(src != NULL);
  1186. mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
  1187. mAlignedOffset = mMappedData - src;
  1188. stop_glerror();
  1189. }
  1190. if (!mMappedData)
  1191. {
  1192. log_glerror();
  1193. //check the availability of memory
  1194. LLMemory::logMemoryInfo(TRUE) ;
  1195. if(!sDisableVBOMapping)
  1196. {
  1197. //--------------------
  1198. //print out more debug info before crash
  1199. llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ;
  1200. GLint size ;
  1201. glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ;
  1202. llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ;
  1203. //--------------------
  1204. GLint buff;
  1205. glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
  1206. if ((GLuint)buff != mGLBuffer)
  1207. {
  1208. llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
  1209. }
  1210. llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl;
  1211. }
  1212. else
  1213. {
  1214. llerrs << "memory allocation for vertex data failed." << llendl ;
  1215. }
  1216. }
  1217. }
  1218. }
  1219. else
  1220. {
  1221. map_range = false;
  1222. }
  1223. if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
  1224. {
  1225. return mMappedData;
  1226. }
  1227. else
  1228. {
  1229. return mMappedData+mOffsets[type]+sTypeSize[type]*index;
  1230. }
  1231. }
  1232. U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
  1233. {
  1234. LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
  1235. bindGLIndices(true);
  1236. if (mFinal)
  1237. {
  1238. llerrs << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << llendl;
  1239. }
  1240. if (!useVBOs() && !mMappedData && !mMappedIndexData)
  1241. {
  1242. llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl;
  1243. }
  1244. if (useVBOs())
  1245. {
  1246. if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
  1247. {
  1248. if (count == -1)
  1249. {
  1250. count = mNumIndices-index;
  1251. }
  1252. bool mapped = false;
  1253. //see if range is already mapped
  1254. for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
  1255. {
  1256. MappedRegion& region = mMappedIndexRegions[i];
  1257. if (expand_region(region, index, count))
  1258. {
  1259. mapped = true;
  1260. break;
  1261. }
  1262. }
  1263. if (!mapped)
  1264. {
  1265. //not already mapped, map new region
  1266. MappedRegion region(TYPE_INDEX, !sDisableVBOMapping && map_range ? -1 : index, count);
  1267. mMappedIndexRegions.push_back(region);
  1268. }
  1269. }
  1270. if (mIndexLocked && map_range)
  1271. {
  1272. llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl;
  1273. }
  1274. if (!mIndexLocked)
  1275. {
  1276. LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
  1277. mIndexLocked = TRUE;
  1278. sMappedCount++;
  1279. stop_glerror();
  1280. if (gDebugGL && useVBOs())
  1281. {
  1282. GLint elem = 0;
  1283. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &elem);
  1284. if (elem != mGLIndices)
  1285. {
  1286. llerrs << "Wrong index buffer bound!" << llendl;
  1287. }
  1288. }
  1289. if(sDisableVBOMapping)
  1290. {
  1291. map_range = false;
  1292. }
  1293. else
  1294. {
  1295. U8* src = NULL;
  1296. waitFence();
  1297. if (gGLManager.mHasMapBufferRange)
  1298. {
  1299. if (map_range)
  1300. {
  1301. #ifdef GL_ARB_map_buffer_range
  1302. S32 offset = sizeof(U16)*index;
  1303. S32 length = sizeof(U16)*count;
  1304. src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
  1305. GL_MAP_WRITE_BIT |
  1306. GL_MAP_FLUSH_EXPLICIT_BIT |
  1307. GL_MAP_INVALIDATE_RANGE_BIT);
  1308. #endif
  1309. }
  1310. else
  1311. {
  1312. #ifdef GL_ARB_map_buffer_range
  1313. src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
  1314. GL_MAP_WRITE_BIT |
  1315. GL_MAP_FLUSH_EXPLICIT_BIT);
  1316. #endif
  1317. }
  1318. }
  1319. else if (gGLManager.mHasFlushBufferRange)
  1320. {
  1321. if (map_range)
  1322. {
  1323. glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
  1324. glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
  1325. src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1326. }
  1327. else
  1328. {
  1329. src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1330. }
  1331. }
  1332. else
  1333. {
  1334. map_range = false;
  1335. src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
  1336. }
  1337. llassert(src != NULL);
  1338. mMappedIndexData = src; //LL_NEXT_ALIGNED_ADDRESS<U8>(src);
  1339. mAlignedIndexOffset = mMappedIndexData - src;
  1340. stop_glerror();
  1341. }
  1342. }
  1343. if (!mMappedIndexData)
  1344. {
  1345. log_glerror();
  1346. LLMemory::logMemoryInfo(TRUE) ;
  1347. if(!sDisableVBOMapping)
  1348. {
  1349. GLint buff;
  1350. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
  1351. if ((GLuint)buff != mGLIndices)
  1352. {
  1353. llerrs << "Invalid GL index buffer bound: " << buff << llendl;
  1354. }
  1355. llerrs << "glMapBuffer returned NULL (no index data)" << llendl;
  1356. }
  1357. else
  1358. {
  1359. llerrs << "memory allocation for Index data failed. " << llendl ;
  1360. }
  1361. }
  1362. }
  1363. else
  1364. {
  1365. map_range = false;
  1366. }
  1367. if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
  1368. {
  1369. return mMappedIndexData;
  1370. }
  1371. else
  1372. {
  1373. return mMappedIndexData + sizeof(U16)*index;
  1374. }
  1375. }
  1376. void LLVertexBuffer::unmapBuffer()
  1377. {
  1378. LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
  1379. if (!useVBOs())
  1380. {
  1381. return ; //nothing to unmap
  1382. }
  1383. bool updated_all = false ;
  1384. if (mMappedData && mVertexLocked)
  1385. {
  1386. bindGLBuffer(true);
  1387. updated_all = mIndexLocked; //both vertex and index buffers done updating
  1388. if(sDisableVBOMapping)
  1389. {
  1390. if (!mMappedVertexRegions.empty())
  1391. {
  1392. stop_glerror();
  1393. for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
  1394. {
  1395. const MappedRegion& region = mMappedVertexRegions[i];
  1396. S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
  1397. S32 length = sTypeSize[region.mType]*region.mCount;
  1398. glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, mMappedData+offset);
  1399. stop_glerror();
  1400. }
  1401. mMappedVertexRegions.clear();
  1402. }
  1403. else
  1404. {
  1405. stop_glerror();
  1406. glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
  1407. stop_glerror();
  1408. }
  1409. }
  1410. else
  1411. {
  1412. if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
  1413. {
  1414. if (!mMappedVertexRegions.empty())
  1415. {
  1416. stop_glerror();
  1417. for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
  1418. {
  1419. const MappedRegion& region = mMappedVertexRegions[i];
  1420. S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
  1421. S32 length = sTypeSize[region.mType]*region.mCount;
  1422. if (gGLManager.mHasMapBufferRange)
  1423. {
  1424. #ifdef GL_ARB_map_buffer_range
  1425. glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
  1426. #endif
  1427. }
  1428. else if (gGLManager.mHasFlushBufferRange)
  1429. {
  1430. glFlushMappedBufferRangeAPPLE(GL_ARRAY_BUFFER_ARB, offset, length);
  1431. }
  1432. stop_glerror();
  1433. }
  1434. mMappedVertexRegions.clear();
  1435. }
  1436. }
  1437. stop_glerror();
  1438. glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
  1439. stop_glerror();
  1440. mMappedData = NULL;
  1441. }
  1442. mVertexLocked = FALSE ;
  1443. sMappedCount--;
  1444. }
  1445. if (mMappedIndexData && mIndexLocked)
  1446. {
  1447. bindGLIndices();
  1448. if(sDisableVBOMapping)
  1449. {
  1450. if (!mMappedIndexRegions.empty())
  1451. {
  1452. for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
  1453. {
  1454. const MappedRegion& region = mMappedIndexRegions[i];
  1455. S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
  1456. S32 length = sizeof(U16)*region.mCount;
  1457. glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, mMappedIndexData+offset);
  1458. stop_glerror();
  1459. }
  1460. mMappedIndexRegions.clear();
  1461. }
  1462. else
  1463. {
  1464. stop_glerror();
  1465. glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
  1466. stop_glerror();
  1467. }
  1468. }
  1469. else
  1470. {
  1471. if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
  1472. {
  1473. if (!mMappedIndexRegions.empty())
  1474. {
  1475. for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
  1476. {
  1477. const MappedRegion& region = mMappedIndexRegions[i];
  1478. S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
  1479. S32 length = sizeof(U16)*region.mCount;
  1480. if (gGLManager.mHasMapBufferRange)
  1481. {
  1482. #ifdef GL_ARB_map_buffer_range
  1483. glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
  1484. #endif
  1485. }
  1486. else if (gGLManager.mHasFlushBufferRange)
  1487. {
  1488. #ifdef GL_APPLE_flush_buffer_range
  1489. glFlushMappedBufferRangeAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
  1490. #endif
  1491. }
  1492. stop_glerror();
  1493. }
  1494. mMappedIndexRegions.clear();
  1495. }
  1496. }
  1497. stop_glerror();
  1498. glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
  1499. stop_glerror();
  1500. mMappedIndexData = NULL ;
  1501. }
  1502. mIndexLocked = FALSE ;
  1503. sMappedCount--;
  1504. }
  1505. if(updated_all)
  1506. {
  1507. mEmpty = FALSE;
  1508. }
  1509. }
  1510. //----------------------------------------------------------------------------
  1511. template <class T,S32 type> struct VertexBufferStrider
  1512. {
  1513. typedef LLStrider<T> strider_t;
  1514. static bool get(LLVertexBuffer& vbo,
  1515. strider_t& strider,
  1516. S32 index, S32 count, bool map_range)
  1517. {
  1518. if (type == LLVertexBuffer::TYPE_INDEX)
  1519. {
  1520. U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
  1521. if (ptr == NULL)
  1522. {
  1523. llwarns << "mapIndexBuffer failed!" << llendl;
  1524. return FALSE;
  1525. }
  1526. strider = (T*)ptr;
  1527. strider.setStride(0);
  1528. return TRUE;
  1529. }
  1530. else if (vbo.hasDataType(type))
  1531. {
  1532. S32 stride = LLVertexBuffer::sTypeSize[type];
  1533. U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
  1534. if (ptr == NULL)
  1535. {
  1536. llwarns << "mapVertexBuffer failed!" << llendl;
  1537. return FALSE;
  1538. }
  1539. strider = (T*)ptr;
  1540. strider.setStride(stride);
  1541. return TRUE;
  1542. }
  1543. else
  1544. {
  1545. llerrs << "VertexBufferStrider could not find valid vertex data." << llendl;
  1546. }
  1547. return FALSE;
  1548. }
  1549. };
  1550. bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
  1551. {
  1552. return VertexBufferStrider<LLVector3,TYPE_VERTEX>::get(*this, strider, index, count, map_range);
  1553. }
  1554. bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector4a>& strider, S32 index, S32 count, bool map_range)
  1555. {
  1556. return VertexBufferStrider<LLVector4a,TYPE_VERTEX>::get(*this, strider, index, count, map_range);
  1557. }
  1558. bool LLVertexBuffer::getIndexStrider(LLStrider<U16>& strider, S32 index, S32 count, bool map_range)
  1559. {
  1560. return VertexBufferStrider<U16,TYPE_INDEX>::get(*this, strider, index, count, map_range);
  1561. }
  1562. bool LLVertexBuffer::getTexCoord0Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
  1563. {
  1564. return VertexBufferStrider<LLVector2,TYPE_TEXCOORD0>::get(*this, strider, index, count, map_range);
  1565. }
  1566. bool LLVertexBuffer::getTexCoord1Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
  1567. {
  1568. return VertexBufferStrider<LLVector2,TYPE_TEXCOORD1>::get(*this, strider, index, count, map_range);
  1569. }
  1570. bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
  1571. {
  1572. return VertexBufferStrider<LLVector3,TYPE_NORMAL>::get(*this, strider, index, count, map_range);
  1573. }
  1574. bool LLVertexBuffer::getBinormalStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
  1575. {
  1576. return VertexBufferStrider<LLVector3,TYPE_BINORMAL>::get(*this, strider, index, count, map_range);
  1577. }
  1578. bool LLVertexBuffer::getColorStrider(LLStrider<LLColor4U>& strider, S32 index, S32 count, bool map_range)
  1579. {
  1580. return VertexBufferStrider<LLColor4U,TYPE_COLOR>::get(*this, strider, index, count, map_range);
  1581. }
  1582. bool LLVertexBuffer::getEmissiveStrider(LLStrider<LLColor4U>& strider, S32 index, S32 count, bool map_range)
  1583. {
  1584. return VertexBufferStrider<LLColor4U,TYPE_EMISSIVE>::get(*this, strider, index, count, map_range);
  1585. }
  1586. bool LLVertexBuffer::getWeightStrider(LLStrider<F32>& strider, S32 index, S32 count, bool map_range)
  1587. {
  1588. return VertexBufferStrider<F32,TYPE_WEIGHT>::get(*this, strider, index, count, map_range);
  1589. }
  1590. bool LLVertexBuffer::getWeight4Strider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
  1591. {
  1592. return VertexBufferStrider<LLVector4,TYPE_WEIGHT4>::get(*this, strider, index, count, map_range);
  1593. }
  1594. bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
  1595. {
  1596. return VertexBufferStrider<LLVector4,TYPE_CLOTHWEIGHT>::get(*this, strider, index, count, map_range);
  1597. }
  1598. //----------------------------------------------------------------------------
  1599. static LLFastTimer::DeclareTimer FTM_BIND_GL_ARRAY("Bind Array");
  1600. bool LLVertexBuffer::bindGLArray()
  1601. {
  1602. if (mGLArray && sGLRenderArray != mGLArray)
  1603. {
  1604. {
  1605. LLFastTimer t(FTM_BIND_GL_ARRAY);
  1606. #if GL_ARB_vertex_array_object
  1607. glBindVertexArray(mGLArray);
  1608. #endif
  1609. sGLRenderArray = mGLArray;
  1610. }
  1611. //really shouldn't be necessary, but some drivers don't properly restore the
  1612. //state of GL_ELEMENT_ARRAY_BUFFER_BINDING
  1613. bindGLIndices();
  1614. return true;
  1615. }
  1616. return false;
  1617. }
  1618. static LLFastTimer::DeclareTimer FTM_BIND_GL_BUFFER("Bind Buffer");
  1619. bool LLVertexBuffer::bindGLBuffer(bool force_bind)
  1620. {
  1621. bindGLArray();
  1622. bool ret = false;
  1623. if (useVBOs() && (force_bind || (mGLBuffer && (mGLBuffer != sGLRenderBuffer || !sVBOActive))))
  1624. {
  1625. LLFastTimer t(FTM_BIND_GL_BUFFER);
  1626. /*if (sMapped)
  1627. {
  1628. llerrs << "VBO bound while another VBO mapped!" << llendl;
  1629. }*/
  1630. glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer);
  1631. sGLRenderBuffer = mGLBuffer;
  1632. sBindCount++;
  1633. sVBOActive = TRUE;
  1634. if (mGLArray)
  1635. {
  1636. llassert(sGLRenderArray == mGLArray);
  1637. //mCachedRenderBuffer = mGLBuffer;
  1638. }
  1639. ret = true;
  1640. }
  1641. return ret;
  1642. }
  1643. static LLFastTimer::DeclareTimer FTM_BIND_GL_INDICES("Bind Indices");
  1644. bool LLVertexBuffer::bindGLIndices(bool force_bind)
  1645. {
  1646. bindGLArray();
  1647. bool ret = false;
  1648. if (useVBOs() && (force_bind || (mGLIndices && (mGLIndices != sGLRenderIndices || !sIBOActive))))
  1649. {
  1650. LLFastTimer t(FTM_BIND_GL_INDICES);
  1651. /*if (sMapped)
  1652. {
  1653. llerrs << "VBO bound while another VBO mapped!" << llendl;
  1654. }*/
  1655. glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, mGLIndices);
  1656. sGLRenderIndices = mGLIndices;
  1657. stop_glerror();
  1658. sBindCount++;
  1659. sIBOActive = TRUE;
  1660. ret = true;
  1661. }
  1662. return ret;
  1663. }
  1664. void LLVertexBuffer::flush()
  1665. {
  1666. if (useVBOs())
  1667. {
  1668. unmapBuffer();
  1669. }
  1670. }
  1671. // Set for rendering
  1672. void LLVertexBuffer::setBuffer(U32 data_mask)
  1673. {
  1674. flush();
  1675. LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER);
  1676. //set up pointers if the data mask is different ...
  1677. BOOL setup = (sLastMask != data_mask);
  1678. if (gDebugGL && data_mask != 0)
  1679. { //make sure data requirements are fulfilled
  1680. LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
  1681. if (shader)
  1682. {
  1683. U32 required_mask = 0;
  1684. for (U32 i = 0; i < LLVertexBuffer::TYPE_TEXTURE_INDEX; ++i)
  1685. {
  1686. if (shader->getAttribLocation(i) > -1)
  1687. {
  1688. U32 required = 1 << i;
  1689. if ((data_mask & required) == 0)
  1690. {
  1691. llwarns << "Missing attribute: " << LLShaderMgr::instance()->mReservedAttribs[i] << llendl;
  1692. }
  1693. required_mask |= required;
  1694. }
  1695. }
  1696. if ((data_mask & required_mask) != required_mask)
  1697. {
  1698. llerrs << "Shader consumption mismatches data provision." << llendl;
  1699. }
  1700. }
  1701. }
  1702. if (useVBOs())
  1703. {
  1704. if (mGLArray)
  1705. {
  1706. bindGLArray();
  1707. setup = FALSE; //do NOT perform pointer setup if using VAO
  1708. }
  1709. else
  1710. {
  1711. if (bindGLBuffer())
  1712. {
  1713. setup = TRUE;
  1714. }
  1715. if (bindGLIndices())
  1716. {
  1717. setup = TRUE;
  1718. }
  1719. }
  1720. BOOL error = FALSE;
  1721. if (gDebugGL && !mGLArray)
  1722. {
  1723. GLint buff;
  1724. glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
  1725. if ((GLuint)buff != mGLBuffer)
  1726. {
  1727. if (gDebugSession)
  1728. {
  1729. error = TRUE;
  1730. gFailLog << "Invalid GL vertex buffer bound: " << buff << std::endl;
  1731. }
  1732. else
  1733. {
  1734. llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
  1735. }
  1736. }
  1737. if (mGLIndices)
  1738. {
  1739. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
  1740. if ((GLuint)buff != mGLIndices)
  1741. {
  1742. if (gDebugSession)
  1743. {
  1744. error = TRUE;
  1745. gFailLog << "Invalid GL index buffer bound: " << buff << std::endl;
  1746. }
  1747. else
  1748. {
  1749. llerrs << "Invalid GL index buffer bound: " << buff << llendl;
  1750. }
  1751. }
  1752. }
  1753. }
  1754. }
  1755. else
  1756. {
  1757. if (sGLRenderArray)
  1758. {
  1759. #if GL_ARB_vertex_array_object
  1760. glBindVertexArray(0);
  1761. #endif
  1762. sGLRenderArray = 0;
  1763. sGLRenderIndices = 0;
  1764. sIBOActive = FALSE;
  1765. }
  1766. if (mGLBuffer)
  1767. {
  1768. if (sVBOActive)
  1769. {
  1770. glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
  1771. sBindCount++;
  1772. sVBOActive = FALSE;
  1773. setup = TRUE; // ... or a VBO is deactivated
  1774. }
  1775. if (sGLRenderBuffer != mGLBuffer)
  1776. {
  1777. sGLRenderBuffer = mGLBuffer;
  1778. setup = TRUE; // ... or a client memory pointer changed
  1779. }
  1780. }
  1781. if (mGLIndices)
  1782. {
  1783. if (sIBOActive)
  1784. {
  1785. glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
  1786. sBindCount++;
  1787. sIBOActive = FALSE;
  1788. }
  1789. sGLRenderIndices = mGLIndices;
  1790. }
  1791. }
  1792. if (!mGLArray)
  1793. {
  1794. setupClientArrays(data_mask);
  1795. }
  1796. if (mGLBuffer)
  1797. {
  1798. if (data_mask && setup)
  1799. {
  1800. setupVertexBuffer(data_mask); // subclass specific setup (virtual function)
  1801. sSetCount++;
  1802. }
  1803. }
  1804. }
  1805. // virtual (default)
  1806. void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
  1807. {
  1808. LLMemType mt2(LLMemType::MTYPE_VERTEX_SETUP_VERTEX_BUFFER);
  1809. stop_glerror();
  1810. U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
  1811. /*if ((data_mask & mTypeMask) != data_mask)
  1812. {
  1813. llerrs << "LLVertexBuffer::setupVertexBuffer missing required components for supplied data mask." << llendl;
  1814. }*/
  1815. if (LLGLSLShader::sNoFixedFunction)
  1816. {
  1817. if (data_mask & MAP_NORMAL)
  1818. {
  1819. S32 loc = TYPE_NORMAL;
  1820. void* ptr = (void*)(base + mOffsets[TYPE_NORMAL]);
  1821. glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_NORMAL], ptr);
  1822. }
  1823. if (data_mask & MAP_TEXCOORD3)
  1824. {
  1825. S32 loc = TYPE_TEXCOORD3;
  1826. void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD3]);
  1827. glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD3], ptr);
  1828. }
  1829. if (data_mask & MAP_TEXCOORD2)
  1830. {
  1831. S32 loc = TYPE_TEXCOORD2;
  1832. void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD2]);
  1833. glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD2], ptr);
  1834. }
  1835. if (data_mask & MAP_TEXCOORD1)
  1836. {
  1837. S32 loc = TYPE_TEXCOORD1;
  1838. void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD1]);
  1839. glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD1], ptr);
  1840. }
  1841. if (data_mask & MAP_BINORMAL)
  1842. {
  1843. S32 loc = TYPE_BINORMAL;
  1844. void* ptr = (void*)(base + mOffsets[TYPE_BINORMAL]);
  1845. glVertexAttribPointerARB(loc, 3,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_BINORMAL], ptr);
  1846. }
  1847. if (data_mask & MAP_TEXCOORD0)
  1848. {
  1849. S32 loc = TYPE_TEXCOORD0;
  1850. void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD0]);
  1851. glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD0], ptr);
  1852. }
  1853. if (data_mask & MAP_COLOR)
  1854. {
  1855. S32 loc = TYPE_COLOR;
  1856. void* ptr = (void*)(base + mOffsets[TYPE_COLOR]);
  1857. glVertexAttribPointerARB(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_COLOR], ptr);
  1858. }
  1859. if (data_mask & MAP_EMISSIVE)
  1860. {
  1861. S32 loc = TYPE_EMISSIVE;
  1862. void* ptr = (void*)(base + mOffsets[TYPE_EMISSIVE]);
  1863. glVertexAttribPointerARB(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_EMISSIVE], ptr);
  1864. }
  1865. if (data_mask & MAP_WEIGHT)
  1866. {
  1867. S32 loc = TYPE_WEIGHT;
  1868. void* ptr = (void*)(base + mOffsets[TYPE_WEIGHT]);
  1869. glVertexAttribPointerARB(loc, 1, GL_FLOAT, FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT], ptr);
  1870. }
  1871. if (data_mask & MAP_WEIGHT4)
  1872. {
  1873. S32 loc = TYPE_WEIGHT4;
  1874. void* ptr = (void*)(base+mOffsets[TYPE_WEIGHT4]);
  1875. glVertexAttribPointerARB(loc, 4, GL_FLOAT, FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT4], ptr);
  1876. }
  1877. if (data_mask & MAP_CLOTHWEIGHT)
  1878. {
  1879. S32 loc = TYPE_CLOTHWEIGHT;
  1880. void* ptr = (void*)(base + mOffsets[TYPE_CLOTHWEIGHT]);
  1881. glVertexAttribPointerARB(loc, 4, GL_FLOAT, TRUE, LLVertexBuffer::sTypeSize[TYPE_CLOTHWEIGHT], ptr);
  1882. }
  1883. if (data_mask & MAP_TEXTURE_INDEX)
  1884. {
  1885. S32 loc = TYPE_TEXTURE_INDEX;
  1886. void *ptr = (void*) (base + mOffsets[TYPE_VERTEX] + 12);
  1887. glVertexAttribPointerARB(loc, 1, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_VERTEX], ptr);
  1888. }
  1889. if (data_mask & MAP_VERTEX)
  1890. {
  1891. S32 loc = TYPE_VERTEX;
  1892. void* ptr = (void*)(base + mOffsets[TYPE_VERTEX]);
  1893. glVertexAttribPointerARB(loc, 3,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_VERTEX], ptr);
  1894. }
  1895. }
  1896. else
  1897. {
  1898. if (data_mask & MAP_NORMAL)
  1899. {
  1900. glNormalPointer(GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_NORMAL], (void*)(base + mOffsets[TYPE_NORMAL]));
  1901. }
  1902. if (data_mask & MAP_TEXCOORD3)
  1903. {
  1904. glClientActiveTextureARB(GL_TEXTURE3_ARB);
  1905. glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD3], (void*)(base + mOffsets[TYPE_TEXCOORD3]));
  1906. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  1907. }
  1908. if (data_mask & MAP_TEXCOORD2)
  1909. {
  1910. glClientActiveTextureARB(GL_TEXTURE2_ARB);
  1911. glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD2], (void*)(base + mOffsets[TYPE_TEXCOORD2]));
  1912. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  1913. }
  1914. if (data_mask & MAP_TEXCOORD1)
  1915. {
  1916. glClientActiveTextureARB(GL_TEXTURE1_ARB);
  1917. glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD1], (void*)(base + mOffsets[TYPE_TEXCOORD1]));
  1918. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  1919. }
  1920. if (data_mask & MAP_BINORMAL)
  1921. {
  1922. glClientActiveTextureARB(GL_TEXTURE2_ARB);
  1923. glTexCoordPointer(3,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_BINORMAL], (void*)(base + mOffsets[TYPE_BINORMAL]));
  1924. glClientActiveTextureARB(GL_TEXTURE0_ARB);
  1925. }
  1926. if (data_mask & MAP_TEXCOORD0)
  1927. {
  1928. glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD0], (void*)(base + mOffsets[TYPE_TEXCOORD0]));
  1929. }
  1930. if (data_mask & MAP_COLOR)
  1931. {
  1932. glColorPointer(4, GL_UNSIGNED_BYTE, LLVertexBuffer::sTypeSize[TYPE_COLOR], (void*)(base + mOffsets[TYPE_COLOR]));
  1933. }
  1934. if (data_mask & MAP_VERTEX)
  1935. {
  1936. glVertexPointer(3,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], (void*)(base + 0));
  1937. }
  1938. }
  1939. llglassertok();
  1940. }
  1941. LLVertexBuffer::MappedRegion::MappedRegion(S32 type, S32 index, S32 count)
  1942. : mType(type), mIndex(index), mCount(count)
  1943. {
  1944. llassert(mType == LLVertexBuffer::TYPE_INDEX ||
  1945. mType < LLVertexBuffer::TYPE_TEXTURE_INDEX);
  1946. }