PageRenderTime 37ms CodeModel.GetById 22ms RepoModel.GetById 1ms app.codeStats 0ms

/indra/llmessage/llbuffer.cpp

https://bitbucket.org/lindenlab/viewer-beta/
C++ | 930 lines | 734 code | 66 blank | 130 comment | 128 complexity | 21d65cf50689d5df2016d91f63b0e570 MD5 | raw file
Possible License(s): LGPL-2.1
  1. /**
  2. * @file llbuffer.cpp
  3. * @author Phoenix
  4. * @date 2005-09-20
  5. * @brief Implementation of the segments, buffers, and buffer arrays.
  6. *
  7. * $LicenseInfo:firstyear=2005&license=viewerlgpl$
  8. * Second Life Viewer Source Code
  9. * Copyright (C) 2010, Linden Research, Inc.
  10. *
  11. * This library is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation;
  14. * version 2.1 of the License only.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  24. *
  25. * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
  26. * $/LicenseInfo$
  27. */
  28. #include "linden_common.h"
  29. #include "llbuffer.h"
  30. #include "llmath.h"
  31. #include "llmemtype.h"
  32. #include "llstl.h"
  33. #include "llthread.h"
  34. #define ASSERT_LLBUFFERARRAY_MUTEX_LOCKED llassert(!mMutexp || mMutexp->isSelfLocked());
  35. /**
  36. * LLSegment
  37. */
  38. LLSegment::LLSegment() :
  39. mChannel(0),
  40. mData(NULL),
  41. mSize(0)
  42. {
  43. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  44. }
  45. LLSegment::LLSegment(S32 channel, U8* data, S32 data_len) :
  46. mChannel(channel),
  47. mData(data),
  48. mSize(data_len)
  49. {
  50. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  51. }
  52. LLSegment::~LLSegment()
  53. {
  54. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  55. }
  56. bool LLSegment::isOnChannel(S32 channel) const
  57. {
  58. return (mChannel == channel);
  59. }
  60. S32 LLSegment::getChannel() const
  61. {
  62. return mChannel;
  63. }
  64. void LLSegment::setChannel(S32 channel)
  65. {
  66. mChannel = channel;
  67. }
  68. U8* LLSegment::data() const
  69. {
  70. return mData;
  71. }
  72. S32 LLSegment::size() const
  73. {
  74. return mSize;
  75. }
  76. bool LLSegment::operator==(const LLSegment& rhs) const
  77. {
  78. if((mData != rhs.mData)||(mSize != rhs.mSize)||(mChannel != rhs.mChannel))
  79. {
  80. return false;
  81. }
  82. return true;
  83. }
  84. /**
  85. * LLHeapBuffer
  86. */
  87. LLHeapBuffer::LLHeapBuffer() :
  88. mBuffer(NULL),
  89. mSize(0),
  90. mNextFree(NULL),
  91. mReclaimedBytes(0)
  92. {
  93. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  94. const S32 DEFAULT_HEAP_BUFFER_SIZE = 16384;
  95. allocate(DEFAULT_HEAP_BUFFER_SIZE);
  96. }
  97. LLHeapBuffer::LLHeapBuffer(S32 size) :
  98. mBuffer(NULL),
  99. mSize(0),
  100. mNextFree(NULL),
  101. mReclaimedBytes(0)
  102. {
  103. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  104. allocate(size);
  105. }
  106. LLHeapBuffer::LLHeapBuffer(const U8* src, S32 len) :
  107. mBuffer(NULL),
  108. mSize(0),
  109. mNextFree(NULL),
  110. mReclaimedBytes(0)
  111. {
  112. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  113. if((len > 0) && src)
  114. {
  115. allocate(len);
  116. if(mBuffer)
  117. {
  118. memcpy(mBuffer, src, len); /*Flawfinder: ignore*/
  119. }
  120. }
  121. }
  122. // virtual
  123. LLHeapBuffer::~LLHeapBuffer()
  124. {
  125. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  126. delete[] mBuffer;
  127. mBuffer = NULL;
  128. mSize = 0;
  129. mNextFree = NULL;
  130. }
  131. S32 LLHeapBuffer::bytesLeft() const
  132. {
  133. return (mSize - (mNextFree - mBuffer));
  134. }
  135. // virtual
  136. bool LLHeapBuffer::createSegment(
  137. S32 channel,
  138. S32 size,
  139. LLSegment& segment)
  140. {
  141. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  142. // get actual size of the segment.
  143. S32 actual_size = llmin(size, (mSize - S32(mNextFree - mBuffer)));
  144. // bail if we cannot build a valid segment
  145. if(actual_size <= 0)
  146. {
  147. return false;
  148. }
  149. // Yay, we're done.
  150. segment = LLSegment(channel, mNextFree, actual_size);
  151. mNextFree += actual_size;
  152. return true;
  153. }
  154. // virtual
  155. bool LLHeapBuffer::reclaimSegment(const LLSegment& segment)
  156. {
  157. if(containsSegment(segment))
  158. {
  159. mReclaimedBytes += segment.size();
  160. if(mReclaimedBytes == mSize)
  161. {
  162. // We have reclaimed all of the memory from this
  163. // buffer. Therefore, we can reset the mNextFree to the
  164. // start of the buffer, and reset the reclaimed bytes.
  165. mReclaimedBytes = 0;
  166. mNextFree = mBuffer;
  167. }
  168. else if(mReclaimedBytes > mSize)
  169. {
  170. llwarns << "LLHeapBuffer reclaimed more memory than allocated."
  171. << " This is probably programmer error." << llendl;
  172. }
  173. return true;
  174. }
  175. return false;
  176. }
  177. // virtual
  178. bool LLHeapBuffer::containsSegment(const LLSegment& segment) const
  179. {
  180. // *NOTE: this check is fairly simple because heap buffers are
  181. // simple contiguous chunks of heap memory.
  182. if((mBuffer > segment.data())
  183. || ((mBuffer + mSize) < (segment.data() + segment.size())))
  184. {
  185. return false;
  186. }
  187. return true;
  188. }
  189. void LLHeapBuffer::allocate(S32 size)
  190. {
  191. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  192. mReclaimedBytes = 0;
  193. mBuffer = new U8[size];
  194. if(mBuffer)
  195. {
  196. mSize = size;
  197. mNextFree = mBuffer;
  198. }
  199. }
  200. /**
  201. * LLBufferArray
  202. */
  203. LLBufferArray::LLBufferArray() :
  204. mNextBaseChannel(0),
  205. mMutexp(NULL)
  206. {
  207. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  208. }
  209. LLBufferArray::~LLBufferArray()
  210. {
  211. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  212. std::for_each(mBuffers.begin(), mBuffers.end(), DeletePointer());
  213. delete mMutexp;
  214. }
  215. // static
  216. LLChannelDescriptors LLBufferArray::makeChannelConsumer(
  217. const LLChannelDescriptors& channels)
  218. {
  219. LLChannelDescriptors rv(channels.out());
  220. return rv;
  221. }
  222. void LLBufferArray::lock()
  223. {
  224. if(mMutexp)
  225. {
  226. mMutexp->lock() ;
  227. }
  228. }
  229. void LLBufferArray::unlock()
  230. {
  231. if(mMutexp)
  232. {
  233. mMutexp->unlock() ;
  234. }
  235. }
  236. LLMutex* LLBufferArray::getMutex()
  237. {
  238. return mMutexp ;
  239. }
  240. void LLBufferArray::setThreaded(bool threaded)
  241. {
  242. if(threaded)
  243. {
  244. if(!mMutexp)
  245. {
  246. mMutexp = new LLMutex(NULL);
  247. }
  248. }
  249. else
  250. {
  251. if(mMutexp)
  252. {
  253. delete mMutexp ;
  254. mMutexp = NULL ;
  255. }
  256. }
  257. }
  258. LLChannelDescriptors LLBufferArray::nextChannel()
  259. {
  260. LLChannelDescriptors rv(mNextBaseChannel++);
  261. return rv;
  262. }
  263. //mMutexp should be locked before calling this.
  264. S32 LLBufferArray::capacity() const
  265. {
  266. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  267. S32 total = 0;
  268. const_buffer_iterator_t iter = mBuffers.begin();
  269. const_buffer_iterator_t end = mBuffers.end();
  270. for(; iter != end; ++iter)
  271. {
  272. total += (*iter)->capacity();
  273. }
  274. return total;
  275. }
  276. bool LLBufferArray::append(S32 channel, const U8* src, S32 len)
  277. {
  278. LLMutexLock lock(mMutexp) ;
  279. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  280. std::vector<LLSegment> segments;
  281. if(copyIntoBuffers(channel, src, len, segments))
  282. {
  283. mSegments.insert(mSegments.end(), segments.begin(), segments.end());
  284. return true;
  285. }
  286. return false;
  287. }
  288. //mMutexp should be locked before calling this.
  289. bool LLBufferArray::prepend(S32 channel, const U8* src, S32 len)
  290. {
  291. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  292. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  293. std::vector<LLSegment> segments;
  294. if(copyIntoBuffers(channel, src, len, segments))
  295. {
  296. mSegments.insert(mSegments.begin(), segments.begin(), segments.end());
  297. return true;
  298. }
  299. return false;
  300. }
  301. bool LLBufferArray::insertAfter(
  302. segment_iterator_t segment,
  303. S32 channel,
  304. const U8* src,
  305. S32 len)
  306. {
  307. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  308. std::vector<LLSegment> segments;
  309. LLMutexLock lock(mMutexp) ;
  310. if(mSegments.end() != segment)
  311. {
  312. ++segment;
  313. }
  314. if(copyIntoBuffers(channel, src, len, segments))
  315. {
  316. mSegments.insert(segment, segments.begin(), segments.end());
  317. return true;
  318. }
  319. return false;
  320. }
  321. //mMutexp should be locked before calling this.
  322. LLBufferArray::segment_iterator_t LLBufferArray::splitAfter(U8* address)
  323. {
  324. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  325. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  326. segment_iterator_t end = mSegments.end();
  327. segment_iterator_t it = getSegment(address);
  328. if(it == end)
  329. {
  330. return end;
  331. }
  332. // We have the location and the segment.
  333. U8* base = (*it).data();
  334. S32 size = (*it).size();
  335. if(address == (base + size))
  336. {
  337. // No need to split, since this is the last byte of the
  338. // segment. We do not want to have zero length segments, since
  339. // that will only incur processing overhead with no advantage.
  340. return it;
  341. }
  342. S32 channel = (*it).getChannel();
  343. LLSegment segment1(channel, base, (address - base) + 1);
  344. *it = segment1;
  345. segment_iterator_t rv = it;
  346. ++it;
  347. LLSegment segment2(channel, address + 1, size - (address - base) - 1);
  348. mSegments.insert(it, segment2);
  349. return rv;
  350. }
  351. //mMutexp should be locked before calling this.
  352. LLBufferArray::segment_iterator_t LLBufferArray::beginSegment()
  353. {
  354. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  355. return mSegments.begin();
  356. }
  357. //mMutexp should be locked before calling this.
  358. LLBufferArray::segment_iterator_t LLBufferArray::endSegment()
  359. {
  360. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  361. return mSegments.end();
  362. }
  363. //mMutexp should be locked before calling this.
  364. LLBufferArray::segment_iterator_t LLBufferArray::constructSegmentAfter(
  365. U8* address,
  366. LLSegment& segment)
  367. {
  368. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  369. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  370. segment_iterator_t rv = mSegments.begin();
  371. segment_iterator_t end = mSegments.end();
  372. if(!address)
  373. {
  374. if(rv != end)
  375. {
  376. segment = (*rv);
  377. }
  378. }
  379. else
  380. {
  381. // we have an address - find the segment it is in.
  382. for( ; rv != end; ++rv)
  383. {
  384. if((address >= (*rv).data())
  385. && (address < ((*rv).data() + (*rv).size())))
  386. {
  387. if((++address) < ((*rv).data() + (*rv).size()))
  388. {
  389. // it's in this segment - construct an appropriate
  390. // sub-segment.
  391. segment = LLSegment(
  392. (*rv).getChannel(),
  393. address,
  394. (*rv).size() - (address - (*rv).data()));
  395. }
  396. else
  397. {
  398. ++rv;
  399. if(rv != end)
  400. {
  401. segment = (*rv);
  402. }
  403. }
  404. break;
  405. }
  406. }
  407. }
  408. if(rv == end)
  409. {
  410. segment = LLSegment();
  411. }
  412. return rv;
  413. }
  414. //mMutexp should be locked before calling this.
  415. LLBufferArray::segment_iterator_t LLBufferArray::getSegment(U8* address)
  416. {
  417. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  418. segment_iterator_t end = mSegments.end();
  419. if(!address)
  420. {
  421. return end;
  422. }
  423. segment_iterator_t it = mSegments.begin();
  424. for( ; it != end; ++it)
  425. {
  426. if((address >= (*it).data())&&(address < (*it).data() + (*it).size()))
  427. {
  428. // found it.
  429. return it;
  430. }
  431. }
  432. return end;
  433. }
  434. //mMutexp should be locked before calling this.
  435. LLBufferArray::const_segment_iterator_t LLBufferArray::getSegment(
  436. U8* address) const
  437. {
  438. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  439. const_segment_iterator_t end = mSegments.end();
  440. if(!address)
  441. {
  442. return end;
  443. }
  444. const_segment_iterator_t it = mSegments.begin();
  445. for( ; it != end; ++it)
  446. {
  447. if((address >= (*it).data())
  448. && (address < (*it).data() + (*it).size()))
  449. {
  450. // found it.
  451. return it;
  452. }
  453. }
  454. return end;
  455. }
  456. /*
  457. U8* LLBufferArray::getAddressAfter(U8* address)
  458. {
  459. U8* rv = NULL;
  460. segment_iterator_t it = getSegment(address);
  461. segment_iterator_t end = mSegments.end();
  462. if(it != end)
  463. {
  464. if(++address < ((*it).data() + (*it).size()))
  465. {
  466. // it's in the same segment
  467. rv = address;
  468. }
  469. else
  470. {
  471. // it's in the next segment
  472. if(++it != end)
  473. {
  474. rv = (*it).data();
  475. }
  476. }
  477. }
  478. return rv;
  479. }
  480. */
  481. S32 LLBufferArray::countAfter(S32 channel, U8* start) const
  482. {
  483. S32 count = 0;
  484. S32 offset = 0;
  485. const_segment_iterator_t it;
  486. LLMutexLock lock(mMutexp) ;
  487. const_segment_iterator_t end = mSegments.end();
  488. if(start)
  489. {
  490. it = getSegment(start);
  491. if(it == end)
  492. {
  493. return count;
  494. }
  495. if(++start < ((*it).data() + (*it).size()))
  496. {
  497. // it's in the same segment
  498. offset = start - (*it).data();
  499. }
  500. else if(++it == end)
  501. {
  502. // it's in the next segment
  503. return count;
  504. }
  505. }
  506. else
  507. {
  508. it = mSegments.begin();
  509. }
  510. while(it != end)
  511. {
  512. if((*it).isOnChannel(channel))
  513. {
  514. count += (*it).size() - offset;
  515. }
  516. offset = 0;
  517. ++it;
  518. }
  519. return count;
  520. }
  521. U8* LLBufferArray::readAfter(
  522. S32 channel,
  523. U8* start,
  524. U8* dest,
  525. S32& len) const
  526. {
  527. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  528. U8* rv = start;
  529. if(!dest || len <= 0)
  530. {
  531. return rv;
  532. }
  533. S32 bytes_left = len;
  534. len = 0;
  535. S32 bytes_to_copy = 0;
  536. const_segment_iterator_t it;
  537. LLMutexLock lock(mMutexp) ;
  538. const_segment_iterator_t end = mSegments.end();
  539. if(start)
  540. {
  541. it = getSegment(start);
  542. if(it == end)
  543. {
  544. return rv;
  545. }
  546. if((++start < ((*it).data() + (*it).size()))
  547. && (*it).isOnChannel(channel))
  548. {
  549. // copy the data out of this segment
  550. S32 bytes_in_segment = (*it).size() - (start - (*it).data());
  551. bytes_to_copy = llmin(bytes_left, bytes_in_segment);
  552. memcpy(dest, start, bytes_to_copy); /*Flawfinder: ignore*/
  553. len += bytes_to_copy;
  554. bytes_left -= bytes_to_copy;
  555. rv = start + bytes_to_copy - 1;
  556. ++it;
  557. }
  558. else
  559. {
  560. ++it;
  561. }
  562. }
  563. else
  564. {
  565. it = mSegments.begin();
  566. }
  567. while(bytes_left && (it != end))
  568. {
  569. if(!((*it).isOnChannel(channel)))
  570. {
  571. ++it;
  572. continue;
  573. }
  574. bytes_to_copy = llmin(bytes_left, (*it).size());
  575. memcpy(dest + len, (*it).data(), bytes_to_copy); /*Flawfinder: ignore*/
  576. len += bytes_to_copy;
  577. bytes_left -= bytes_to_copy;
  578. rv = (*it).data() + bytes_to_copy - 1;
  579. ++it;
  580. }
  581. return rv;
  582. }
  583. U8* LLBufferArray::seek(
  584. S32 channel,
  585. U8* start,
  586. S32 delta) const
  587. {
  588. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  589. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  590. const_segment_iterator_t it;
  591. const_segment_iterator_t end = mSegments.end();
  592. U8* rv = start;
  593. if(0 == delta)
  594. {
  595. if((U8*)npos == start)
  596. {
  597. // someone is looking for end of data.
  598. segment_list_t::const_reverse_iterator rit = mSegments.rbegin();
  599. segment_list_t::const_reverse_iterator rend = mSegments.rend();
  600. while(rit != rend)
  601. {
  602. if(!((*rit).isOnChannel(channel)))
  603. {
  604. ++rit;
  605. continue;
  606. }
  607. rv = (*rit).data() + (*rit).size();
  608. break;
  609. }
  610. }
  611. else if(start)
  612. {
  613. // This is sort of a weird case - check if zero bytes away
  614. // from current position is on channel and return start if
  615. // that is true. Otherwise, return NULL.
  616. it = getSegment(start);
  617. if((it == end) || !(*it).isOnChannel(channel))
  618. {
  619. rv = NULL;
  620. }
  621. }
  622. else
  623. {
  624. // Start is NULL, so return the very first byte on the
  625. // channel, or NULL.
  626. it = mSegments.begin();
  627. while((it != end) && !(*it).isOnChannel(channel))
  628. {
  629. ++it;
  630. }
  631. if(it != end)
  632. {
  633. rv = (*it).data();
  634. }
  635. }
  636. return rv;
  637. }
  638. if(start)
  639. {
  640. it = getSegment(start);
  641. if((it != end) && (*it).isOnChannel(channel))
  642. {
  643. if(delta > 0)
  644. {
  645. S32 bytes_in_segment = (*it).size() - (start - (*it).data());
  646. S32 local_delta = llmin(delta, bytes_in_segment);
  647. rv += local_delta;
  648. delta -= local_delta;
  649. ++it;
  650. }
  651. else
  652. {
  653. S32 bytes_in_segment = start - (*it).data();
  654. S32 local_delta = llmin(llabs(delta), bytes_in_segment);
  655. rv -= local_delta;
  656. delta += local_delta;
  657. }
  658. }
  659. }
  660. else if(delta < 0)
  661. {
  662. // start is NULL, and delta indicates seeking backwards -
  663. // return NULL.
  664. return NULL;
  665. }
  666. else
  667. {
  668. // start is NULL and delta > 0
  669. it = mSegments.begin();
  670. }
  671. if(delta > 0)
  672. {
  673. // At this point, we have an iterator into the segments, and
  674. // are seeking forward until delta is zero or we run out
  675. while(delta && (it != end))
  676. {
  677. if(!((*it).isOnChannel(channel)))
  678. {
  679. ++it;
  680. continue;
  681. }
  682. if(delta <= (*it).size())
  683. {
  684. // it's in this segment
  685. rv = (*it).data() + delta;
  686. }
  687. delta -= (*it).size();
  688. ++it;
  689. }
  690. if(delta && (it == end))
  691. {
  692. // Whoops - sought past end.
  693. rv = NULL;
  694. }
  695. }
  696. else //if(delta < 0)
  697. {
  698. // We are at the beginning of a segment, and need to search
  699. // backwards.
  700. segment_list_t::const_reverse_iterator rit(it);
  701. segment_list_t::const_reverse_iterator rend = mSegments.rend();
  702. while(delta && (rit != rend))
  703. {
  704. if(!((*rit).isOnChannel(channel)))
  705. {
  706. ++rit;
  707. continue;
  708. }
  709. if(llabs(delta) <= (*rit).size())
  710. {
  711. // it's in this segment.
  712. rv = (*rit).data() + (*rit).size() + delta;
  713. delta = 0;
  714. }
  715. else
  716. {
  717. delta += (*rit).size();
  718. }
  719. ++rit;
  720. }
  721. if(delta && (rit == rend))
  722. {
  723. // sought past the beginning.
  724. rv = NULL;
  725. }
  726. }
  727. return rv;
  728. }
  729. //test use only
  730. bool LLBufferArray::takeContents(LLBufferArray& source)
  731. {
  732. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  733. LLMutexLock lock(mMutexp);
  734. source.lock();
  735. std::copy(
  736. source.mBuffers.begin(),
  737. source.mBuffers.end(),
  738. std::back_insert_iterator<buffer_list_t>(mBuffers));
  739. source.mBuffers.clear();
  740. std::copy(
  741. source.mSegments.begin(),
  742. source.mSegments.end(),
  743. std::back_insert_iterator<segment_list_t>(mSegments));
  744. source.mSegments.clear();
  745. source.mNextBaseChannel = 0;
  746. source.unlock();
  747. return true;
  748. }
  749. //mMutexp should be locked before calling this.
  750. LLBufferArray::segment_iterator_t LLBufferArray::makeSegment(
  751. S32 channel,
  752. S32 len)
  753. {
  754. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  755. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  756. // start at the end of the buffers, because it is the most likely
  757. // to have free space.
  758. LLSegment segment;
  759. buffer_list_t::reverse_iterator it = mBuffers.rbegin();
  760. buffer_list_t::reverse_iterator end = mBuffers.rend();
  761. bool made_segment = false;
  762. for(; it != end; ++it)
  763. {
  764. if((*it)->createSegment(channel, len, segment))
  765. {
  766. made_segment = true;
  767. break;
  768. }
  769. }
  770. segment_iterator_t send = mSegments.end();
  771. if(!made_segment)
  772. {
  773. LLBuffer* buf = new LLHeapBuffer;
  774. mBuffers.push_back(buf);
  775. if(!buf->createSegment(channel, len, segment))
  776. {
  777. // failed. this should never happen.
  778. return send;
  779. }
  780. }
  781. // store and return the newly made segment
  782. mSegments.insert(send, segment);
  783. std::list<LLSegment>::reverse_iterator rv = mSegments.rbegin();
  784. ++rv;
  785. send = rv.base();
  786. return send;
  787. }
  788. //mMutexp should be locked before calling this.
  789. bool LLBufferArray::eraseSegment(const segment_iterator_t& erase_iter)
  790. {
  791. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  792. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  793. // Find out which buffer contains the segment, and if it is found,
  794. // ask it to reclaim the memory.
  795. bool rv = false;
  796. LLSegment segment(*erase_iter);
  797. buffer_iterator_t iter = mBuffers.begin();
  798. buffer_iterator_t end = mBuffers.end();
  799. for(; iter != end; ++iter)
  800. {
  801. // We can safely call reclaimSegment on every buffer, and once
  802. // it returns true, the segment was found.
  803. if((*iter)->reclaimSegment(segment))
  804. {
  805. rv = true;
  806. break;
  807. }
  808. }
  809. // No need to get the return value since we are not interested in
  810. // the interator retured by the call.
  811. (void)mSegments.erase(erase_iter);
  812. return rv;
  813. }
  814. //mMutexp should be locked before calling this.
  815. bool LLBufferArray::copyIntoBuffers(
  816. S32 channel,
  817. const U8* src,
  818. S32 len,
  819. std::vector<LLSegment>& segments)
  820. {
  821. ASSERT_LLBUFFERARRAY_MUTEX_LOCKED
  822. LLMemType m1(LLMemType::MTYPE_IO_BUFFER);
  823. if(!src || !len) return false;
  824. S32 copied = 0;
  825. LLSegment segment;
  826. buffer_iterator_t it = mBuffers.begin();
  827. buffer_iterator_t end = mBuffers.end();
  828. for(; it != end;)
  829. {
  830. if(!(*it)->createSegment(channel, len, segment))
  831. {
  832. ++it;
  833. continue;
  834. }
  835. segments.push_back(segment);
  836. S32 bytes = llmin(segment.size(), len);
  837. memcpy(segment.data(), src + copied, bytes); /* Flawfinder: Ignore */
  838. copied += bytes;
  839. len -= bytes;
  840. if(0 == len)
  841. {
  842. break;
  843. }
  844. }
  845. while(len)
  846. {
  847. LLBuffer* buf = new LLHeapBuffer;
  848. mBuffers.push_back(buf);
  849. if(!buf->createSegment(channel, len, segment))
  850. {
  851. // this totally failed - bail. This is the weird corner
  852. // case were we 'leak' memory. No worries about an actual
  853. // leak - we will still reclaim the memory later, but this
  854. // particular buffer array is hosed for some reason.
  855. // This should never happen.
  856. return false;
  857. }
  858. segments.push_back(segment);
  859. memcpy(segment.data(), src + copied, segment.size()); /*Flawfinder: ignore*/
  860. copied += segment.size();
  861. len -= segment.size();
  862. }
  863. return true;
  864. }