PageRenderTime 59ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/tags/release-1.0.3/mkvsplit/mkvsplitfilter.cpp

#
C++ | 1210 lines | 662 code | 314 blank | 234 comment | 121 complexity | b5c61a7097e0c96c6865261e1a82ef7b MD5 | raw file
  1. #include <strmif.h>
  2. #include <uuids.h>
  3. #include "mkvsplitfilter.hpp"
  4. #include "cenumpins.hpp"
  5. #include "mkvsplitoutpin.hpp"
  6. #include "mkvparser.hpp"
  7. #include "mkvparserstreamvideo.hpp"
  8. #include "mkvparserstreamaudio.hpp"
  9. #include <new>
  10. #include <cassert>
  11. #include <vfwmsgs.h>
  12. #include <process.h>
  13. #ifdef _DEBUG
  14. #include "iidstr.hpp"
  15. #include "odbgstream.hpp"
  16. using std::endl;
  17. #endif
  18. using std::wstring;
  19. //using std::wistringstream;
  20. namespace MkvSplit
  21. {
  22. // {ADB85B5C-AA6B-4192-85FC-B0E087E9CA37}
  23. extern const CLSID CLSID_MkvSplit =
  24. { 0xadb85b5c, 0xaa6b, 0x4192, { 0x85, 0xfc, 0xb0, 0xe0, 0x87, 0xe9, 0xca, 0x37 } };
  25. // {4CAB9818-1989-4a5f-8474-2D1F66B420F2}
  26. extern const GUID MEDIASUBTYPE_MKV = //TODO: use std value
  27. { 0x4cab9818, 0x1989, 0x4a5f, { 0x84, 0x74, 0x2d, 0x1f, 0x66, 0xb4, 0x20, 0xf2 } };
  28. Filter::Lock::Lock() : m_hMutex(0)
  29. {
  30. }
  31. Filter::Lock::~Lock()
  32. {
  33. Release();
  34. }
  35. HRESULT Filter::Lock::Seize(Filter* pFilter, DWORD timeout)
  36. {
  37. assert(m_hMutex == 0);
  38. assert(pFilter);
  39. DWORD index;
  40. const HRESULT hr = CoWaitForMultipleHandles(
  41. 0, //wait flags
  42. timeout,
  43. 1,
  44. &pFilter->m_hMutex,
  45. &index);
  46. //despite the "S" in this name, this is an error
  47. if (hr == RPC_S_CALLPENDING)
  48. return VFW_E_TIMEOUT;
  49. if (FAILED(hr))
  50. return hr;
  51. assert(index == 0);
  52. m_hMutex = pFilter->m_hMutex;
  53. return S_OK;
  54. }
  55. void Filter::Lock::Release()
  56. {
  57. if (m_hMutex)
  58. {
  59. const BOOL b = ReleaseMutex(m_hMutex);
  60. assert(b);
  61. b;
  62. m_hMutex = 0;
  63. }
  64. }
  65. HRESULT CreateInstance(
  66. IClassFactory* pClassFactory,
  67. IUnknown* pOuter,
  68. const IID& iid,
  69. void** ppv)
  70. {
  71. if (ppv == 0)
  72. return E_POINTER;
  73. *ppv = 0;
  74. if ((pOuter != 0) && (iid != __uuidof(IUnknown)))
  75. return E_INVALIDARG;
  76. Filter* p = new (std::nothrow) Filter(pClassFactory, pOuter);
  77. if (p == 0)
  78. return E_OUTOFMEMORY;
  79. assert(p->m_nondelegating.m_cRef == 0);
  80. const HRESULT hr = p->m_nondelegating.QueryInterface(iid, ppv);
  81. if (SUCCEEDED(hr))
  82. {
  83. assert(*ppv);
  84. assert(p->m_nondelegating.m_cRef == 1);
  85. return S_OK;
  86. }
  87. assert(*ppv == 0);
  88. assert(p->m_nondelegating.m_cRef == 0);
  89. delete p;
  90. p = 0;
  91. return hr;
  92. }
  93. #pragma warning(disable:4355) //'this' ptr in member init list
  94. Filter::Filter(IClassFactory* pClassFactory, IUnknown* pOuter)
  95. : m_pClassFactory(pClassFactory),
  96. m_nondelegating(this),
  97. m_pOuter(pOuter ? pOuter : &m_nondelegating),
  98. m_state(State_Stopped),
  99. m_clock(0),
  100. m_hThread(0),
  101. m_hStop(0),
  102. m_pSegment(0),
  103. m_inpin(this)
  104. {
  105. m_pClassFactory->LockServer(TRUE);
  106. m_hMutex = CreateMutex(0, 0, 0);
  107. assert(m_hMutex); //TODO
  108. m_hStop = CreateEvent(0, 0, 0, 0);
  109. assert(m_hStop); //TODO
  110. m_info.pGraph = 0;
  111. m_info.achName[0] = L'\0';
  112. #ifdef _DEBUG
  113. odbgstream os;
  114. os << "mkvsrc::ctor" << endl;
  115. #endif
  116. }
  117. #pragma warning(default:4355)
  118. Filter::~Filter()
  119. {
  120. #ifdef _DEBUG
  121. odbgstream os;
  122. os << "mkvsrc::dtor" << endl;
  123. #endif
  124. #if 1
  125. assert(m_outpins.empty());
  126. assert(m_pSegment == 0);
  127. #else
  128. while (!m_outpins.empty())
  129. {
  130. Outpin* p = m_pins.back();
  131. assert(p);
  132. m_pins.pop_back();
  133. delete p;
  134. }
  135. delete m_pSegment;
  136. #endif
  137. assert(m_hThread == 0);
  138. BOOL b = CloseHandle(m_hMutex);
  139. assert(b);
  140. b = CloseHandle(m_hStop);
  141. assert(b);
  142. m_pClassFactory->LockServer(FALSE);
  143. }
  144. void Filter::Init()
  145. {
  146. assert(m_hThread == 0);
  147. const BOOL b = ResetEvent(m_hStop);
  148. assert(b);
  149. const uintptr_t h = _beginthreadex(
  150. 0, //security
  151. 0, //stack size
  152. &Filter::ThreadProc,
  153. this,
  154. 0, //run immediately
  155. 0); //thread id
  156. m_hThread = reinterpret_cast<HANDLE>(h);
  157. assert(m_hThread);
  158. }
  159. void Filter::Final()
  160. {
  161. if (m_hThread == 0)
  162. return;
  163. BOOL b = SetEvent(m_hStop);
  164. assert(b);
  165. const DWORD dw = WaitForSingleObject(m_hThread, INFINITE);
  166. assert(dw == WAIT_OBJECT_0);
  167. b = CloseHandle(m_hThread);
  168. assert(b);
  169. m_hThread = 0;
  170. }
  171. Filter::CNondelegating::CNondelegating(Filter* p)
  172. : m_pFilter(p),
  173. m_cRef(0) //see CreateInstance
  174. {
  175. }
  176. Filter::CNondelegating::~CNondelegating()
  177. {
  178. }
  179. HRESULT Filter::CNondelegating::QueryInterface(
  180. const IID& iid,
  181. void** ppv)
  182. {
  183. if (ppv == 0)
  184. return E_POINTER;
  185. IUnknown*& pUnk = reinterpret_cast<IUnknown*&>(*ppv);
  186. if (iid == __uuidof(IUnknown))
  187. {
  188. pUnk = this; //must be nondelegating
  189. }
  190. else if ((iid == __uuidof(IBaseFilter)) ||
  191. (iid == __uuidof(IMediaFilter)) ||
  192. (iid == __uuidof(IPersist)))
  193. {
  194. pUnk = static_cast<IBaseFilter*>(m_pFilter);
  195. }
  196. else
  197. {
  198. #if 0
  199. wodbgstream os;
  200. os << "mkvsource::filter::QI: iid=" << IIDStr(iid) << std::endl;
  201. #endif
  202. pUnk = 0;
  203. return E_NOINTERFACE;
  204. }
  205. pUnk->AddRef();
  206. return S_OK;
  207. }
  208. ULONG Filter::CNondelegating::AddRef()
  209. {
  210. return InterlockedIncrement(&m_cRef);
  211. }
  212. ULONG Filter::CNondelegating::Release()
  213. {
  214. if (LONG n = InterlockedDecrement(&m_cRef))
  215. return n;
  216. delete m_pFilter;
  217. return 0;
  218. }
  219. HRESULT Filter::QueryInterface(const IID& iid, void** ppv)
  220. {
  221. return m_pOuter->QueryInterface(iid, ppv);
  222. }
  223. ULONG Filter::AddRef()
  224. {
  225. return m_pOuter->AddRef();
  226. }
  227. ULONG Filter::Release()
  228. {
  229. return m_pOuter->Release();
  230. }
  231. HRESULT Filter::GetClassID(CLSID* p)
  232. {
  233. if (p == 0)
  234. return E_POINTER;
  235. *p = CLSID_MkvSplit;
  236. return S_OK;
  237. }
  238. HRESULT Filter::Stop()
  239. {
  240. //Stop is a synchronous operation: when it completes,
  241. //the filter is stopped.
  242. Lock lock;
  243. HRESULT hr = lock.Seize(this);
  244. if (FAILED(hr))
  245. return hr;
  246. switch (m_state)
  247. {
  248. case State_Paused:
  249. case State_Running:
  250. //Stop is synchronous. When stop completes, all threads
  251. //should be stopped. What does "stopped" mean" In our
  252. //case it probably means "terminated".
  253. //It's a bit tricky here because we hold the filter
  254. //lock. If threads need to acquire filter lock
  255. //then we'll have to release it. Only the FGM can call
  256. //Stop, etc, so there's no problem to release lock
  257. //while Stop is executing, to allow threads to acquire
  258. //filter lock temporarily.
  259. //The streaming thread will receiving an indication
  260. //automatically (assuming it's connected), either via
  261. //GetBuffer or Receive, so there's nothing this filter
  262. //needs to do to tell the streaming thread to stop.
  263. //One implementation strategy is to have build a
  264. //vector of thread handles, and then wait for a signal
  265. //on one of them. When the handle is signalled
  266. //(meaning that the thread has terminated), then
  267. //we remove that handle from the vector, close the
  268. //handle, and the wait again. Repeat until the
  269. //all threads have been terminated.
  270. //We also need to clean up any unused samples,
  271. //and decommit the allocator. (In fact, we could
  272. //decommit the allocator immediately, and then wait
  273. //for the threads to terminated.)
  274. lock.Release();
  275. OnStop();
  276. hr = lock.Seize(this);
  277. assert(SUCCEEDED(hr)); //TODO
  278. break;
  279. case State_Stopped:
  280. default:
  281. break;
  282. }
  283. m_state = State_Stopped;
  284. return S_OK;
  285. }
  286. HRESULT Filter::Pause()
  287. {
  288. //Unlike Stop(), Pause() can be asynchronous (that's why you have
  289. //GetState()). We could use that here to build the samples index.
  290. Lock lock;
  291. HRESULT hr = lock.Seize(this);
  292. if (FAILED(hr))
  293. return hr;
  294. switch (m_state)
  295. {
  296. case State_Stopped:
  297. OnStart();
  298. break;
  299. case State_Running:
  300. case State_Paused:
  301. default:
  302. break;
  303. }
  304. m_state = State_Paused;
  305. return S_OK;
  306. }
  307. HRESULT Filter::Run(REFERENCE_TIME start)
  308. {
  309. Lock lock;
  310. HRESULT hr = lock.Seize(this);
  311. if (FAILED(hr))
  312. return hr;
  313. switch (m_state)
  314. {
  315. case State_Stopped:
  316. OnStart();
  317. break;
  318. case State_Paused:
  319. case State_Running:
  320. default:
  321. break;
  322. }
  323. m_start = start;
  324. m_state = State_Running;
  325. return S_OK;
  326. }
  327. HRESULT Filter::GetState(
  328. DWORD /* timeout */ ,
  329. FILTER_STATE* p)
  330. {
  331. if (p == 0)
  332. return E_POINTER;
  333. //What the GetState.timeout parameter refers to is not to locking
  334. //the filter, but rather to waiting to determine the current state.
  335. //A request to Stop is always synchronous (hence no timeout parameter),
  336. //but a request to Pause can be asynchronous, so the caller can say
  337. //how long he's willing to wait for the transition (to paused) to
  338. //complete.
  339. //TODO: implement a waiting scheme here. We'll probably have to
  340. //use SignalObjectAndWait atomically release the mutex and then
  341. //wait for the condition variable to change.
  342. //if (hr == VFW_E_TIMEOUT)
  343. // return VFW_S_STATE_INTERMEDIATE;
  344. Lock lock;
  345. const HRESULT hr = lock.Seize(this);
  346. //The lock is only used for synchronization. If Seize fails,
  347. //it means there's a serious problem with the filter.
  348. if (FAILED(hr))
  349. return E_FAIL;
  350. *p = m_state;
  351. return S_OK;
  352. }
  353. HRESULT Filter::SetSyncSource(
  354. IReferenceClock* clock)
  355. {
  356. Lock lock;
  357. HRESULT hr = lock.Seize(this);
  358. if (FAILED(hr))
  359. return hr;
  360. if (m_clock)
  361. m_clock->Release();
  362. m_clock = clock;
  363. if (m_clock)
  364. m_clock->AddRef();
  365. return S_OK;
  366. }
  367. HRESULT Filter::GetSyncSource(
  368. IReferenceClock** pclock)
  369. {
  370. if (pclock == 0)
  371. return E_POINTER;
  372. Lock lock;
  373. HRESULT hr = lock.Seize(this);
  374. if (FAILED(hr))
  375. return hr;
  376. IReferenceClock*& clock = *pclock;
  377. clock = m_clock;
  378. if (clock)
  379. clock->AddRef();
  380. return S_OK;
  381. }
  382. HRESULT Filter::EnumPins(IEnumPins** pp)
  383. {
  384. Lock lock;
  385. HRESULT hr = lock.Seize(this);
  386. if (FAILED(hr))
  387. return hr;
  388. const ULONG outpins_count = static_cast<ULONG>(m_outpins.size());
  389. const ULONG n = 1 + outpins_count;
  390. const size_t cb = n * sizeof(IPin*);
  391. IPin** const pins = (IPin**)_alloca(cb);
  392. IPin** pin = pins;
  393. *pin++ = &m_inpin;
  394. typedef outpins_t::iterator iter_t;
  395. iter_t i = m_outpins.begin();
  396. const iter_t j = m_outpins.end();
  397. while (i != j)
  398. *pin++ = *i++;
  399. return CEnumPins::CreateInstance(pins, n, pp);
  400. }
  401. HRESULT Filter::FindPin(
  402. LPCWSTR id1,
  403. IPin** pp)
  404. {
  405. if (pp == 0)
  406. return E_POINTER;
  407. IPin*& p = *pp;
  408. p = 0;
  409. if (id1 == 0)
  410. return E_INVALIDARG;
  411. {
  412. Pin* const pPin = &m_inpin;
  413. const wstring& id2_ = pPin->m_id;
  414. const wchar_t* const id2 = id2_.c_str();
  415. if (wcscmp(id1, id2) == 0) //case-sensitive
  416. {
  417. p = pPin;
  418. p->AddRef();
  419. return S_OK;
  420. }
  421. }
  422. typedef outpins_t::const_iterator iter_t;
  423. iter_t i = m_outpins.begin();
  424. const iter_t j = m_outpins.end();
  425. while (i != j)
  426. {
  427. Pin* const pPin = *i++;
  428. const wstring& id2_ = pPin->m_id;
  429. const wchar_t* const id2 = id2_.c_str();
  430. if (wcscmp(id1, id2) == 0) //case-sensitive
  431. {
  432. p = pPin;
  433. p->AddRef();
  434. return S_OK;
  435. }
  436. }
  437. return VFW_E_NOT_FOUND;
  438. }
  439. HRESULT Filter::QueryFilterInfo(FILTER_INFO* p)
  440. {
  441. if (p == 0)
  442. return E_POINTER;
  443. Lock lock;
  444. HRESULT hr = lock.Seize(this);
  445. if (FAILED(hr))
  446. return hr;
  447. enum { size = sizeof(p->achName)/sizeof(WCHAR) };
  448. const errno_t e = wcscpy_s(p->achName, size, m_info.achName);
  449. e;
  450. assert(e == 0);
  451. p->pGraph = m_info.pGraph;
  452. if (p->pGraph)
  453. p->pGraph->AddRef();
  454. return S_OK;
  455. }
  456. HRESULT Filter::JoinFilterGraph(
  457. IFilterGraph *pGraph,
  458. LPCWSTR name)
  459. {
  460. Lock lock;
  461. HRESULT hr = lock.Seize(this);
  462. if (FAILED(hr))
  463. return hr;
  464. //NOTE:
  465. //No, do not adjust reference counts here!
  466. //Read the docs for the reasons why.
  467. //ENDNOTE.
  468. m_info.pGraph = pGraph;
  469. if (name == 0)
  470. m_info.achName[0] = L'\0';
  471. else
  472. {
  473. enum { size = sizeof(m_info.achName)/sizeof(WCHAR) };
  474. const errno_t e = wcscpy_s(m_info.achName, size, name);
  475. e;
  476. assert(e == 0); //TODO
  477. }
  478. return S_OK;
  479. }
  480. HRESULT Filter::QueryVendorInfo(LPWSTR* pstr)
  481. {
  482. if (pstr == 0)
  483. return E_POINTER;
  484. wchar_t*& str = *pstr;
  485. str = 0;
  486. return E_NOTIMPL;
  487. }
  488. HRESULT Filter::Open(IAsyncReader* pReader)
  489. {
  490. assert(pReader);
  491. assert(m_pSegment == 0);
  492. //assert(!bool(m_pAllocator));
  493. assert(m_outpins.empty());
  494. __int64 result, pos;
  495. //TODO: must initialize header to defaults
  496. MkvParser::EBMLHeader h;
  497. result = h.Parse(pReader, pos);
  498. if (result < 0) //error
  499. return static_cast<HRESULT>(result);
  500. if (result > 0) //need more data
  501. return VFW_E_BUFFER_UNDERFLOW; //require full header
  502. if (h.m_version > 1)
  503. return VFW_E_INVALID_FILE_FORMAT;
  504. if (h.m_maxIdLength > 8)
  505. return VFW_E_INVALID_FILE_FORMAT;
  506. if (h.m_maxSizeLength > 8)
  507. return VFW_E_INVALID_FILE_FORMAT;
  508. if (_stricmp(h.m_docType.c_str(), "matroska") != 0)
  509. return VFW_E_INVALID_FILE_FORMAT;
  510. //Just the EBML header has been consumed. pos points
  511. //to start of (first) segment.
  512. MkvParser::Segment* p;
  513. result = MkvParser::Segment::CreateInstance(pReader, pos, p);
  514. if (result < 0)
  515. return static_cast<HRESULT>(result);
  516. if (result > 0)
  517. return VFW_E_BUFFER_UNDERFLOW;
  518. assert(p);
  519. std::auto_ptr<MkvParser::Segment> pSegment(p);
  520. result = pSegment->Parse();
  521. if (result < 0)
  522. return static_cast<HRESULT>(result);
  523. if (result > 0)
  524. return VFW_E_BUFFER_UNDERFLOW;
  525. const MkvParser::Tracks* const pTracks = pSegment->GetTracks();
  526. if (pTracks == 0)
  527. return VFW_E_INVALID_FILE_FORMAT;
  528. const MkvParser::SegmentInfo* const pInfo = pSegment->GetInfo();
  529. if (pInfo == 0)
  530. return VFW_E_INVALID_FILE_FORMAT; //TODO: liberalize
  531. using MkvParser::VideoTrack;
  532. using MkvParser::VideoStream;
  533. using MkvParser::AudioTrack;
  534. using MkvParser::AudioStream;
  535. typedef TCreateOutpins<VideoTrack, VideoStream> EV;
  536. typedef TCreateOutpins<MkvParser::AudioTrack, MkvParser::AudioStream> EA;
  537. const EV ev(this, &VideoStream::CreateInstance);
  538. pTracks->EnumerateVideoTracks(ev);
  539. const EA ea(this, &AudioStream::CreateInstance);
  540. pTracks->EnumerateAudioTracks(ea);
  541. if (m_outpins.empty())
  542. return VFW_E_INVALID_FILE_FORMAT; //TODO: better return value here?
  543. ALLOCATOR_PROPERTIES props;
  544. props.cbBuffer = GetMaxBufferSize();
  545. props.cbAlign = 1;
  546. props.cbPrefix = 0;
  547. props.cBuffers = 1;
  548. //HRESULT hr = pReader->RequestAllocator(0, &props, &m_pAllocator);
  549. //assert(SUCCEEDED(hr)); //TODO
  550. //assert(bool(m_pAllocator));
  551. m_pSegment = pSegment.release();
  552. return S_OK;
  553. }
  554. void Filter::OnStart()
  555. {
  556. //TODO: init inpin
  557. typedef outpins_t::iterator iter_t;
  558. iter_t i = m_outpins.begin();
  559. const iter_t j = m_outpins.end();
  560. while (i != j)
  561. {
  562. Outpin* const pPin = *i++;
  563. assert(pPin);
  564. pPin->Init();
  565. }
  566. Init();
  567. }
  568. void Filter::OnStop()
  569. {
  570. Final();
  571. typedef outpins_t::iterator iter_t;
  572. iter_t i = m_outpins.begin();
  573. const iter_t j = m_outpins.end();
  574. while (i != j)
  575. {
  576. Outpin* const pPin = *i++;
  577. assert(pPin);
  578. pPin->Final();
  579. }
  580. //TODO: final inpin
  581. }
  582. int Filter::GetConnectionCount() const
  583. {
  584. //filter already locked by caller
  585. int n = 0;
  586. typedef outpins_t::const_iterator iter_t;
  587. iter_t i = m_outpins.begin();
  588. const iter_t j = m_outpins.end();
  589. while (i != j)
  590. {
  591. const Outpin* const pin = *i++;
  592. assert(pin);
  593. if (pin->m_pPinConnection)
  594. ++n;
  595. }
  596. return n;
  597. }
  598. #if 0
  599. long Filter::GetMaxBufferSize() const
  600. {
  601. Lock lock;
  602. const HRESULT hr = lock.Seize(this);
  603. assert(SUCCEEDED(hr));
  604. long maxsize = 0;
  605. typedef outpins_t::const_iterator iter_t;
  606. iter_t i = m_outpins.begin();
  607. const iter_t j = m_outpins.end();
  608. while (i != j)
  609. {
  610. const Outpin* const pin = *i++;
  611. assert(pin);
  612. const long size = pin->GetBufferSize();
  613. assert(size >= 0);
  614. if (size > maxsize)
  615. maxsize = size;
  616. }
  617. return maxsize;
  618. }
  619. #endif
  620. unsigned Filter::ThreadProc(void* pv)
  621. {
  622. Filter* const pFilter = static_cast<Filter*>(pv);
  623. assert(pFilter);
  624. return pFilter->Main();
  625. }
  626. #if 1
  627. unsigned Filter::Main()
  628. {
  629. //assert(bool(m_pAllocator));
  630. //TODO: this isn't perfect, because an EBML is necessarily larger
  631. //than the frame holds.
  632. //
  633. //We have been loading clusters in-total, but the size of a cluster
  634. //will be larger than the largest frame, which all we calculate
  635. //in GetMaxBufferSize. We could attempt to allocate a very large
  636. //buffer for the media sample, larger than a cluster, but this probably
  637. //won't work because we have no control over how large the cluster is.
  638. //
  639. //That means we're going to have to incrementally load a cluster
  640. //similar to how we load the segment. But then again, we only need
  641. //to wait for the few header bytes to identify the size. The actual
  642. //problem is that for us to asynchronously read, we need to read
  643. //the entire buffer. I don't really care about asynchronously
  644. //reading -- the only reason I'm going that is because I need a way
  645. //to do a timed read, because I want to know whether the bytes are
  646. //available. I'm just as satisfied to do a synchronous read,
  647. //once I know I have the data.
  648. //Segment::Parse() returns the total number of bytes as returned
  649. //by IAsyncReader::Length(&avail, ...). But if we want to do
  650. //an async read, we need the start and stop posn, not the total
  651. //number of bytes available. Segment::m_pos should be pretty
  652. //close to value of available bytes.
  653. //
  654. //We could read 1 byte from the end of the range we need
  655. //asynchronously, and then synchrnously read all of the bytes.
  656. //Or we could read chunks (aligned reads if more efficient),
  657. //and then read from the chunks. That might be simpler because
  658. //we don't have to typed reads until later. We load page-by-page
  659. //as untyped bytes, and then read elements from the chunks.
  660. //This does mean we'll need a secondary abstraction.
  661. //We don't really need to do this.
  662. //We could do reads along alignment boundaries (in fact be must,
  663. //even for async reads). What Segment::Parse should pass back
  664. //is pos and size of element (instead of just pos + size, which
  665. //is pos of last byte needed). When then read in a series of
  666. //pages, that includes all the requested data. (The pages
  667. //are aligned, so we will have read in extra data on both ends
  668. //of the range.) This has the benefit that we need not compute
  669. //a max buffer size for reads from the input stream. In a sense
  670. //we're building our own cache.
  671. const long size = GetMaxBufferSize();
  672. assert(size > 0);
  673. for (;;)
  674. {
  675. //const __int64 result = Parse(size);
  676. //if result < 0 then error
  677. // handle this by announcing EOS for all streams
  678. //
  679. //if result > 0 then we need to wait for availability
  680. //
  681. //if result = 0 then we have successfully loaded a new cluster
  682. //const __int64 result = m_pSegment->Parse();
  683. //We know how many bytes we need available.
  684. //__int64 pos, size;
  685. //hr = pSegment->Parse(pos, size);
  686. //if failed(hr) return;
  687. //
  688. //pSegment is bound to the cache. It doesn't know anything
  689. //about the input's IAsyncReader interaface. This thread
  690. //is the only thread that knows about that.
  691. //
  692. //Here we can load the cache one page at a time.
  693. //We wait for the read of a single page to complete.
  694. //If the read times out, then we attempt to read that page again.
  695. //If the read is successful, we move on to th next page,
  696. // and read that.
  697. //What all pages in this request have been swapped in, then
  698. //we try the parse again: since all pages are now in cache,
  699. //Segment::Parse should return 0 (meaning success).
  700. //That means we have loaded a new cluster. We announce this
  701. //fact by setting an event, that the streaming threads are
  702. //waiting for. The streaming threads consume blocks in this
  703. //newly-loaded cluster. This worker thread goes back to the
  704. //top of the loop, and calls Segment::Parse again.
  705. //
  706. //We need to convert from (pos, size) 2-tuple to a list
  707. //of pages. When we can then loop over the pages.
  708. //I we give our cache read an special operation that says,
  709. //load all of the data in (pos, size) in cache, this will
  710. //correspond to a cluster. It's probably OK to keep a
  711. //cluster in memory (assuming clusters aren't too large).
  712. //We can then have the streaming threads read using
  713. //the cache reader in the normal way (by calling SyncRead);
  714. //since the pages are in cache the blocks will be read
  715. //quickly. If the network I/O happens faster than the
  716. //playback rate, that would be wasteful, since the cluster
  717. //from which the streaming threads are reading will have
  718. //an earlier timecode, but if the playback rate is faster
  719. //than network I/O, then a cached read is the best we
  720. //can do (but really, we don't ever want to be in this
  721. //place).
  722. //Maybe the easiest solution is for the worker thread
  723. //to not do anything special when it reaches the end
  724. //of what's available. If Segment::Parse returns a value
  725. //greater than 0, then we've reached the end of what we
  726. //can play, so we should just report EC_STARVATION immediately.
  727. //The only problem is if the worker thread is reading
  728. //faster than the streaming thread, then there's no problem.
  729. //So maybe this the whole business of having a worker thread
  730. //is misguided, since we want it to be the streaming thread
  731. //that detects whether we've run out of bytes available
  732. //for streaming.
  733. //
  734. //On the other hand, the streaming thread could signal that
  735. //it's waiting for a new cluster. If the worker thread
  736. //reaches the end of what's available, then it can check
  737. //whether a streaming thread has signalled that it's waiting
  738. //for a new cluster. Only if a streaming thread has signalled
  739. //would the worker thread give up and signal EC_STARVATION;
  740. //if the streaming threads are earilier in the stream, then
  741. //the wouldn't signal because the next cluster would always
  742. //be there. On the other hand, if a streaming thread reaches
  743. //a point where there are no more clusters, then that's still
  744. //the end-of-the-line (at least temporarily), and maybe it
  745. //doesn't make any sense for it to bother signalling the
  746. //worker thread (since it could signal EC_STARVATION) just
  747. //as easily.
  748. }
  749. }
  750. #else
  751. unsigned Filter::Main()
  752. {
  753. std::vector<HANDLE> v;
  754. v.reserve(1 + m_outpins.size());
  755. v.push_back(m_hStop);
  756. typedef outpins_t::iterator iter_t;
  757. iter_t i = m_outpins.begin();
  758. const iter_t j = m_outpins.end();
  759. while (i != j)
  760. {
  761. Outpin* const pPin = *i++;
  762. assert(pPin);
  763. const HANDLE h = pPin->m_hSampleCanBePopulated;
  764. assert(h);
  765. v.push_back(h);
  766. }
  767. const HANDLE* const hh = &v[0];
  768. const DWORD n = static_cast<DWORD>(v.size());
  769. const DWORD dwTimeout = INFINITE;
  770. for (;;)
  771. {
  772. //We need to request sample
  773. //pass sample to segment::parse
  774. //segment::parse can use sample to determine cluser boundaries
  775. //call IAsyncReader::WaitForNext with 1 sec timeout
  776. //if read timeout, check for stop bit
  777. // if stop bit set, then return
  778. // otherwise go back and re-attempt read
  779. // I hate having to poll this way but I don't know what else to do
  780. //otherwise (not read timeout)
  781. // parse new cluster
  782. // wait for outpins to signal readiness to receive frame
  783. // we'll have to determine pin's place in stream
  784. // if pin is still feeding off the frame we just read,
  785. // then we can give him another block (because it's loaded)
  786. // otherwise if pin has already consumed all of its frames
  787. // in this block, then we can't service his desire for new
  788. // frame until we load another cluster
  789. // it might also be the case the no pins are ready to consume
  790. // a new frame, so we can immediately wait for another frame
  791. // we have a problem here - if pin wants a frame, and we have it
  792. // loaded, then we shouldn't delay giving him a frame because
  793. // we're waiting to load the *next* cluster
  794. // if all streams have consumed all frames in curr cluster,
  795. // then loop back to top and wait for next cluster.
  796. //We could change the PopulateSample method to work off of
  797. //clusters and blocks within clusters, instead of flattening
  798. //the block hierarchy as we do now. A streaming thread could
  799. //signal its desire for a new cluster, when it has exhausted
  800. //the supply of blocks for this track on the current cluster.
  801. //When the worker thread has a new cluster, it can wait for
  802. //a signal from the streaming threads. The streaming threads
  803. //consume the blocks on the new cluster, and then signal when
  804. //they consume all of the blocks.
  805. //
  806. //It would be nice if we new here how far behind the slowest
  807. //streaming thread is. If the worker thread is.
  808. //
  809. //Whenever the wroker thread creates a new cluster, it
  810. //signals availability of the new cluster. The streaming
  811. //thread can wait for availability of the new cluster.
  812. //(This is similar to what we do already, with media
  813. //samples.) When the streaming thread wakes up (because a
  814. //new cluster has been announced), it can enter the critical
  815. //section and check whether this cluster is of interest.
  816. //If not it goes back to sleep and waits for another signal;
  817. //otherwise, it consumes blocks from this cluster.
  818. //
  819. //The only time a streaming thread would wait is because
  820. //it ran out of clusters. The only time the worker thread
  821. //would signal is because a new cluster becaome availalbe.
  822. //The worker thread need not wait for availability of a
  823. //media sample; that is strictly the concern of the streaming
  824. //thread. It doesn't seem like the worker thread would need
  825. //to wait for anything besides availability of new data.
  826. //
  827. //We don't necessarily want to fall off of the end of the
  828. //queue, since then we'd lose our place. We'll have to
  829. //conditionally check whether a new cluster is available
  830. //before navigating to the next cluster.
  831. const DWORD dw = WaitForMultipleObjects(n, hh, 0, dwTimeout);
  832. #if 0
  833. if (dw == WAIT_TIMEOUT)
  834. {
  835. Lock lock;
  836. const HRESULT hr = lock.Seize(this);
  837. assert(SUCCEEDED(hr));
  838. const __int64 result = m_pSegment->Parse();
  839. assert(result == 0);
  840. if (m_pSegment->Unparsed() <= 0)
  841. dwTimeout = INFINITE;
  842. continue;
  843. }
  844. #endif
  845. assert(dw >= WAIT_OBJECT_0);
  846. assert(dw < (WAIT_OBJECT_0 + n));
  847. if (dw == WAIT_OBJECT_0) //hStop
  848. return 0;
  849. const DWORD idx = dw - (WAIT_OBJECT_0 + 1);
  850. assert(idx < m_outpins.size());
  851. PopulateSamples(hh + 1, idx);
  852. }
  853. }
  854. #endif
  855. void Filter::PopulateSamples(const HANDLE* hh_begin, DWORD idx)
  856. {
  857. //idx represents the pin that just signalled
  858. for (;;)
  859. {
  860. Outpin* const pPin = m_outpins[idx];
  861. assert(pPin);
  862. pPin->PopulateSample();
  863. if (++idx >= m_outpins.size())
  864. return;
  865. const HANDLE* const hh = hh_begin + idx;
  866. const DWORD n = static_cast<DWORD>(m_outpins.size()) - idx;
  867. const DWORD dw = WaitForMultipleObjects(n, hh, 0, 0);
  868. if (dw == WAIT_TIMEOUT)
  869. return;
  870. assert(dw >= WAIT_OBJECT_0);
  871. assert(dw < (WAIT_OBJECT_0 + n));
  872. idx += dw - WAIT_OBJECT_0;
  873. }
  874. }
  875. HRESULT Filter::OnDisconnectInpin()
  876. {
  877. while (!m_outpins.empty())
  878. {
  879. Outpin* const pPin = m_outpins.back();
  880. assert(pPin);
  881. if (IPin* pPinConnection = pPin->m_pPinConnection)
  882. {
  883. assert(m_info.pGraph);
  884. HRESULT hr = m_info.pGraph->Disconnect(pPinConnection);
  885. assert(SUCCEEDED(hr));
  886. hr = m_info.pGraph->Disconnect(pPin);
  887. assert(SUCCEEDED(hr));
  888. }
  889. m_outpins.pop_back();
  890. delete pPin;
  891. }
  892. delete m_pSegment;
  893. m_pSegment = 0;
  894. return S_OK;
  895. }
  896. } //end namespace MkvSplit