PageRenderTime 176ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/mp4seek/iso.py

https://github.com/ylatuya/mp4seek
Python | 1341 lines | 1012 code | 221 blank | 108 comment | 163 complexity | e540121d78cc9dec8cecfca5ee42130b MD5 | raw file
  1. from math import ceil
  2. import struct
  3. import atoms
  4. from atoms import read_fcc, read_ulong, read_ulonglong
  5. def write_uchar(fobj, n):
  6. fobj.write(struct.pack('>B', n))
  7. def write_ulong(fobj, n):
  8. fobj.write(struct.pack('>L', n))
  9. def write_ulonglong(fobj, n):
  10. fobj.write(struct.pack('>Q', n))
  11. def write_fcc(fobj, fcc_str):
  12. # print '[wfcc]: @%d %r' % (fobj.tell(), fcc_str)
  13. fobj.write('%-4.4s' % fcc_str)
  14. def takeby(seq, n, force_tuples=False):
  15. if n == 1 and not force_tuples:
  16. return seq
  17. return [tuple(seq[i:i + n]) for i in xrange(0, len(seq), n)]
  18. def read_table(f, row_spec, entries, spec_prefix='>'):
  19. """Read a continuous region of file and unpack it into a list of
  20. tuples using the given struct specification of a single row.
  21. @param row_spec: spec describing single row of table using same
  22. syntax as in L{struct} module.
  23. @type row_spec: str
  24. @param entries: number of rows to read
  25. @type entries: int
  26. @param spec_prefix: optional specification that will be used for
  27. the whole table
  28. @type spec_prefix: str
  29. """
  30. if entries == 0:
  31. return []
  32. row_bytes = struct.calcsize('%s%s' % (spec_prefix, row_spec))
  33. data = f.read(row_bytes * entries)
  34. try:
  35. l = struct.unpack('%s%s' % (spec_prefix, row_spec * entries), data)
  36. except struct.error:
  37. raise RuntimeError('Not enough data: requested %d, read %d' %
  38. (row_bytes * entries, len(data)))
  39. per_row = len(l) / entries
  40. return takeby(l, per_row)
  41. class UnsuportedVersion(Exception):
  42. pass
  43. class FormatError(Exception):
  44. pass
  45. class CannotSelect(Exception):
  46. pass
  47. class AttribInitializer(type):
  48. def __new__(meta, classname, bases, classdict):
  49. if '_fields' in classdict:
  50. fields = classdict['_fields']
  51. orig_init = classdict.pop('__init__', None)
  52. def __init__(self, *a, **kw):
  53. f_dict = {}
  54. for f in fields:
  55. f_dict[f] = kw.pop(f, None)
  56. if orig_init:
  57. self.__dict__.update(f_dict)
  58. orig_init(self, *a, **kw)
  59. elif bases and bases[0] != object:
  60. super(self.__class__, self).__init__(*a, **kw)
  61. self.__dict__.update(f_dict)
  62. classdict['__init__'] = __init__
  63. if '__repr__' not in classdict:
  64. def __repr__(self):
  65. r = '%s(%s)' % (self.__class__.__name__,
  66. ', '.join(['%s=%r' % (n, getattr(self, n))
  67. for n in fields]))
  68. return r
  69. classdict['__repr__'] = __repr__
  70. return type.__new__(meta, classname, bases, classdict)
  71. class Box(object):
  72. __metaclass__ = AttribInitializer
  73. def __init__(self, atom):
  74. self._atom = atom
  75. def get_size(self):
  76. # should be overriden in the boxes we want to be able to modify
  77. return self._atom.get_size()
  78. def get_offset(self):
  79. return self._atom.get_offset()
  80. def copy(self, *a, **kw):
  81. cls = self.__class__
  82. if getattr(self, '_fields', None):
  83. attribs = dict([(k, getattr(self, k)) for k in self._fields])
  84. attribs.update(dict([(k, kw[k]) for k in self._fields if k in kw]))
  85. else:
  86. attribs = {}
  87. return cls(self._atom, **attribs)
  88. def write(self, fobj):
  89. # print '[ b] writing:', self
  90. self._atom.write(fobj)
  91. def write_head(self, fobj):
  92. # assuming 'short' sizes for now - FIXME!
  93. # print '[ b] writing head:', self._atom
  94. a = self._atom
  95. write_ulong(fobj, self.get_size())
  96. write_fcc(fobj, a.type)
  97. if (a.extended_type):
  98. fobj.write(a.extended_type)
  99. class FullBox(Box):
  100. def tabled_size(self, body_size, loop_size):
  101. # TODO: move to a separate TableFullBox subclass?
  102. return (self._atom.head_size_ext() + body_size +
  103. len(self.table) * loop_size)
  104. def write_head(self, fobj):
  105. Box.write_head(self, fobj)
  106. a = self._atom
  107. write_ulong(fobj, (a.v & 0xff) << 24 | (a.flags & 0xffffff))
  108. class ContainerBox(Box):
  109. def __init__(self, *a, **kw):
  110. Box.__init__(self, *a, **kw)
  111. self._extra_children = []
  112. def get_size(self):
  113. # print '[>] getting size: %r' % self._atom
  114. fields = getattr(self, '_fields', [])
  115. cd = self._atom.get_children_dict()
  116. size = self._atom.head_size_ext()
  117. for k, v in cd.items():
  118. if k in fields:
  119. v = getattr(self, k)
  120. if not isinstance(v, (tuple, list)):
  121. if v is None:
  122. v = []
  123. else:
  124. v = [v]
  125. # print 'size for %r = %r' % (sum([a.get_size() for a in v]), v)
  126. size += sum([a.get_size() for a in v])
  127. size += sum([a.get_size() for a in self._extra_children])
  128. # print '[<] getting size: %r = %r' % (self._atom, size)
  129. return size
  130. def write(self, fobj):
  131. self.write_head(fobj)
  132. fields = getattr(self, '_fields', [])
  133. cd = self._atom.get_children_dict()
  134. to_write = []
  135. for k, v in cd.items():
  136. if k in fields:
  137. v = getattr(self, k)
  138. if not isinstance(v, (tuple, list)):
  139. if v is None:
  140. v = []
  141. else:
  142. v = [v]
  143. to_write.extend(v)
  144. def _get_offset(a):
  145. return a.get_offset()
  146. to_write.sort(key=_get_offset)
  147. to_write.extend(self._extra_children)
  148. # print '[ ] going to write:', \
  149. # ([(isinstance(a, Box) and a._atom.type or a.type)
  150. # for a in to_write])
  151. for ca in to_write:
  152. # print '[cb] writing:', ca
  153. ca.write(fobj)
  154. def add_extra_children(self, al):
  155. self._extra_children.extend(al)
  156. def fullboxread(f):
  157. def _with_full_atom_read_wrapper(cls, a):
  158. return f(cls, atoms.full(a))
  159. return _with_full_atom_read_wrapper
  160. def containerboxread(f):
  161. def _with_container_atom_read_wrapper(cls, a):
  162. return f(cls, atoms.container(a))
  163. return _with_container_atom_read_wrapper
  164. def ver_skip(atom, sizes):
  165. if atom.v > len(sizes) or atom.v < 0:
  166. raise UnsuportedVersion('version requested: %d' % atom.v)
  167. atom.skip(sizes[atom.v])
  168. def ver_read(atom, readers):
  169. if atom.v > len(readers) or atom.v < 0:
  170. raise UnsuportedVersion('version requested: %d' % atom.v)
  171. return readers[atom.v](atom.f)
  172. def maybe_build_atoms(atype, alist):
  173. cls = globals().get(atype)
  174. if cls and issubclass(cls, Box):
  175. return map(cls.read, alist)
  176. return alist
  177. def select_children_atoms(a, *selection):
  178. return select_atoms(a.get_children_dict(), *selection)
  179. def select_atoms(ad, *selection):
  180. """ad: atom dict
  181. selection: [(type, min_required, max_required), ...]"""
  182. selected = []
  183. for atype, req_min, req_max in selection:
  184. alist = ad.get(atype, [])
  185. found = len(alist)
  186. if ((req_min is not None and found < req_min) or
  187. (req_max is not None and found > req_max)):
  188. raise CannotSelect('requested number of atoms %r: in [%s; %s],'
  189. ' found: %d (all children: %r)' %
  190. (atype, req_min, req_max, found, ad))
  191. alist = maybe_build_atoms(atype, alist)
  192. if req_max == 1:
  193. if found == 0:
  194. selected.append(None)
  195. else:
  196. selected.append(alist[0])
  197. else:
  198. selected.append(alist)
  199. return selected
  200. def find_atom(alist, type):
  201. return [a.type for a in alist].index(type)
  202. def write_atoms(alist, f):
  203. # alist - list of Atoms or Boxes
  204. for a in alist:
  205. a.write(f)
  206. def find_samplenum_stts(stts, mt):
  207. "stts - table of the 'stts' atom; mt - media time"
  208. ctime = 0
  209. samples = 1
  210. i, n = 0, len(stts)
  211. while i < n:
  212. # print 'fsstts:', mt, ctime, stts[i], samples, ctime
  213. if mt == ctime:
  214. break
  215. count, delta = stts[i]
  216. cdelta = count * delta
  217. if mt < ctime + cdelta:
  218. samples += int(ceil((mt - ctime) / float(delta)))
  219. break
  220. ctime += cdelta
  221. samples += count
  222. i += 1
  223. return samples
  224. def find_mediatime_stts(stts, sample):
  225. ctime = 0
  226. samples = 1
  227. i, n = 0, len(stts)
  228. while i < n:
  229. count, delta = stts[i]
  230. if samples + count >= sample:
  231. return ctime + (sample - samples) * delta
  232. ctime += count * delta
  233. samples += count
  234. i += 1
  235. return ctime
  236. def find_mediatimes(stts, samples):
  237. ctime = 0
  238. total_samples = 1
  239. ret = []
  240. i, n = 0, len(stts)
  241. j, m = 0, len(samples)
  242. while i < n and j < m:
  243. count, delta = stts[i]
  244. sample = samples[j]
  245. if total_samples + count >= sample:
  246. ret.append(ctime + (sample - total_samples) * delta)
  247. j += 1
  248. continue
  249. ctime += count * delta
  250. total_samples += count
  251. i += 1
  252. return ret
  253. def find_chunknum_stsc(stsc, sample_num):
  254. current = 1 # 1-based indices!
  255. per_chunk = 0
  256. samples = 1
  257. i, n = 0, len(stsc)
  258. while i < n:
  259. # print 'fcnstsc:', sample_num, current, stsc[i], samples, per_chunk
  260. next, next_per_chunk, _sdidx = stsc[i]
  261. samples_here = (next - current) * per_chunk
  262. if samples + samples_here > sample_num:
  263. break
  264. samples += samples_here
  265. current, per_chunk = next, next_per_chunk
  266. i += 1
  267. return int((sample_num - samples) // per_chunk + current)
  268. def get_chunk_offset(stco64, chunk_num):
  269. # 1-based indices!
  270. return stco64[chunk_num - 1]
  271. class uuid(FullBox):
  272. _extended_type = None
  273. @classmethod
  274. def read(cls, a):
  275. # TODO implement a lookup of child classes based on _extended_type?
  276. raise Exception("not implemented yet")
  277. class uuid_sscurrent(uuid):
  278. _fields = ('timestamp', 'duration')
  279. _extended_type = "\x6d\x1d\x9b\x05\x42\xd5\x44\xe6\x80\xe2\x14\x1d\xaf\xf7\x57\xb2"
  280. def write(self, fobj):
  281. self.write_head(fobj)
  282. write_ulonglong(fobj, self.timestamp)
  283. write_ulonglong(fobj, self.duration)
  284. def get_size(self):
  285. size = self._atom.head_size_ext()
  286. size += 2 * 8
  287. return size
  288. @classmethod
  289. def read(cls, a):
  290. raise Exception("not implemented yet")
  291. @classmethod
  292. def make(cls, timestamp, duration):
  293. a = atoms.FullAtom(0, "uuid", 0, 1, 0, None, extended_type=cls._extended_type)
  294. s = cls(a)
  295. s.timestamp = timestamp
  296. s.duration = duration
  297. return s
  298. class uuid_ssnext(FullBox):
  299. _fields = ('entries')
  300. _extended_type = "\xd4\x80\x7e\xf2\xca\x39\x46\x95\x8e\x54\x26\xcb\x9e\x46\xa7\x9f"
  301. def write(self, fobj):
  302. self.write_head(fobj)
  303. write_uchar(fobj, len(self.entries))
  304. for ts, duration in self.entries:
  305. write_ulonglong(fobj, ts)
  306. write_ulonglong(fobj, duration)
  307. def get_size(self):
  308. size = self._atom.head_size_ext()
  309. size += 1 + (2 * 8) * len(self.entries)
  310. return size
  311. @classmethod
  312. def read(cls, a):
  313. raise Exception("not implemented yet")
  314. @classmethod
  315. def make(cls, entries):
  316. a = atoms.FullAtom(0, "uuid", 0, 1, 0, None, extended_type=cls._extended_type)
  317. s = cls(a)
  318. s.entries = entries
  319. return s
  320. class mvhd(FullBox):
  321. _fields = (
  322. # 'creation_time', 'modification_time',
  323. 'timescale', 'duration',
  324. # 'rate', 'volume', 'matrix', 'next_track_ID'
  325. )
  326. @classmethod
  327. @fullboxread
  328. def read(cls, a):
  329. ver_skip(a, (8, 16))
  330. ts = read_ulong(a.f)
  331. d = ver_read(a, (read_ulong, read_ulonglong))
  332. return cls(a, timescale=ts, duration=d)
  333. def write(self, fobj):
  334. self.write_head(fobj)
  335. a = self._atom
  336. a.seek_to_start()
  337. a.skip(a.head_size_ext())
  338. if a.v == 0:
  339. fobj.write(a.read_bytes(8))
  340. write_ulong(fobj, self.timescale)
  341. write_ulong(fobj, self.duration)
  342. a.skip(8)
  343. elif a.v == 1:
  344. fobj.write(a.read_bytes(16))
  345. write_ulong(fobj, self.timescale)
  346. write_ulonglong(fobj, self.duration)
  347. a.skip(12)
  348. else:
  349. raise RuntimeError()
  350. fobj.write(a.read_bytes(80))
  351. class tkhd(FullBox):
  352. _fields = ('duration', 'id')
  353. @classmethod
  354. @fullboxread
  355. def read(cls, a):
  356. ver_skip(a, (8, 16))
  357. id = read_ulong(a.f)
  358. a.skip(4)
  359. d = ver_read(a, (read_ulong, read_ulonglong))
  360. return cls(a, duration=d, id=id)
  361. def write(self, fobj):
  362. self.write_head(fobj)
  363. a = self._atom
  364. a.seek_to_start()
  365. a.skip(a.head_size_ext())
  366. if a.v == 0:
  367. fobj.write(a.read_bytes(8))
  368. write_ulong(fobj, self.id)
  369. a.skip(4)
  370. fobj.write(a.read_bytes(4))
  371. write_ulong(fobj, self.duration)
  372. a.skip(4)
  373. elif a.v == 1:
  374. fobj.write(a.read_bytes(16))
  375. write_ulong(fobj, self.id)
  376. a.skip(4)
  377. fobj.write(a.read_bytes(4))
  378. write_ulonglong(fobj, self.duration)
  379. a.skip(8)
  380. else:
  381. raise RuntimeError()
  382. fobj.write(a.read_bytes(60))
  383. class mdhd(FullBox):
  384. _fields = ('timescale', 'duration')
  385. @classmethod
  386. @fullboxread
  387. def read(cls, a):
  388. ver_skip(a, (8, 16))
  389. ts = read_ulong(a.f)
  390. d = ver_read(a, (read_ulong, read_ulonglong))
  391. return cls(a, timescale=ts, duration=d)
  392. def write(self, fobj):
  393. self.write_head(fobj)
  394. a = self._atom
  395. a.seek_to_start()
  396. a.skip(a.head_size_ext())
  397. if a.v == 0:
  398. fobj.write(a.read_bytes(8))
  399. write_ulong(fobj, self.timescale)
  400. write_ulong(fobj, self.duration)
  401. a.skip(8)
  402. elif a.v == 1:
  403. fobj.write(a.read_bytes(16))
  404. write_ulong(fobj, self.timescale)
  405. write_ulonglong(fobj, self.duration)
  406. a.skip(12)
  407. else:
  408. raise RuntimeError()
  409. fobj.write(a.read_bytes(4))
  410. class stts(FullBox):
  411. _fields = ('table',)
  412. @classmethod
  413. @fullboxread
  414. def read(cls, a):
  415. entries = read_ulong(a.f)
  416. t = read_table(a.f, 'LL', entries)
  417. return cls(a, table=t)
  418. def get_size(self):
  419. return self.tabled_size(4, 8)
  420. def write(self, fobj):
  421. self.write_head(fobj)
  422. write_ulong(fobj, len(self.table))
  423. for elt in self.table:
  424. write_ulong(fobj, elt[0])
  425. write_ulong(fobj, elt[1])
  426. class ctts(FullBox):
  427. _fields = ('table',)
  428. @classmethod
  429. @fullboxread
  430. def read(cls, a):
  431. entries = read_ulong(a.f)
  432. t = read_table(a.f, 'LL', entries)
  433. return cls(a, table=t)
  434. def get_size(self):
  435. return self.tabled_size(4, 8)
  436. def write(self, fobj):
  437. self.write_head(fobj)
  438. write_ulong(fobj, len(self.table))
  439. for elt in self.table:
  440. write_ulong(fobj, elt[0])
  441. write_ulong(fobj, elt[1])
  442. class stss(FullBox):
  443. _fields = ('table',)
  444. @classmethod
  445. @fullboxread
  446. def read(cls, a):
  447. entries = read_ulong(a.f)
  448. t = read_table(a.f, 'L', entries)
  449. return cls(a, table=t)
  450. def get_size(self):
  451. return self.tabled_size(4, 4)
  452. def write(self, fobj):
  453. self.write_head(fobj)
  454. write_ulong(fobj, len(self.table))
  455. for elt in self.table:
  456. write_ulong(fobj, elt)
  457. class stsz(FullBox):
  458. _fields = ('sample_size', 'table')
  459. @classmethod
  460. @fullboxread
  461. def read(cls, a):
  462. ss = read_ulong(a.f)
  463. entries = read_ulong(a.f)
  464. if ss == 0:
  465. t = read_table(a.f, 'L', entries)
  466. else:
  467. t = []
  468. return cls(a, sample_size=ss, table=t)
  469. def get_size(self):
  470. if self.sample_size != 0:
  471. return self._atom.head_size_ext() + 8
  472. return self.tabled_size(8, 4)
  473. def write(self, fobj):
  474. self.write_head(fobj)
  475. write_ulong(fobj, self.sample_size)
  476. write_ulong(fobj, len(self.table))
  477. if self.sample_size == 0:
  478. for elt in self.table:
  479. write_ulong(fobj, elt)
  480. class stsc(FullBox):
  481. _fields = ('table',)
  482. @classmethod
  483. @fullboxread
  484. def read(cls, a):
  485. entries = read_ulong(a.f)
  486. t = read_table(a.f, 'LLL', entries)
  487. return cls(a, table=t)
  488. def get_size(self):
  489. return self.tabled_size(4, 12)
  490. def write(self, fobj):
  491. self.write_head(fobj)
  492. write_ulong(fobj, len(self.table))
  493. for elt in self.table:
  494. write_ulong(fobj, elt[0])
  495. write_ulong(fobj, elt[1])
  496. write_ulong(fobj, elt[2])
  497. class stco(FullBox):
  498. _fields = ('table',)
  499. @classmethod
  500. @fullboxread
  501. def read(cls, a):
  502. entries = read_ulong(a.f)
  503. t = read_table(a.f, 'L', entries)
  504. return cls(a, table=t)
  505. def get_size(self):
  506. return self.tabled_size(4, 4)
  507. def write(self, fobj):
  508. self.write_head(fobj)
  509. write_ulong(fobj, len(self.table))
  510. for elt in self.table:
  511. write_ulong(fobj, elt)
  512. class co64(FullBox):
  513. _fields = ('table',)
  514. @classmethod
  515. @fullboxread
  516. def read(cls, a):
  517. entries = read_ulong(a.f)
  518. t = read_table(a.f, 'Q', entries)
  519. return cls(a, table=t)
  520. def get_size(self):
  521. return self.tabled_size(4, 8)
  522. def write(self, fobj):
  523. self.write_head(fobj)
  524. write_ulong(fobj, len(self.table))
  525. for elt in self.table:
  526. write_ulonglong(fobj, elt)
  527. class stz2(FullBox):
  528. _fields = ('field_size', 'table')
  529. @classmethod
  530. @fullboxread
  531. def read(cls, a):
  532. field_size = read_ulong(a.f) & 0xff
  533. entries = read_ulong(a.f)
  534. def read_2u4(f):
  535. b = read_bytes(f, 1)
  536. return (b >> 4) & 0x0f, b & 0x0f
  537. def flatten(l):
  538. ret = []
  539. for elt in l:
  540. ret.extend(elt)
  541. return ret
  542. if field_size == 16:
  543. t = read_table(a.f, 'H', entries)
  544. elif field_size == 8:
  545. t = read_table(a.f, 'B', entries)
  546. elif field_size == 4:
  547. t = flatten([read_2u4(a.f) for _ in xrange((entries + 1) / 2)])
  548. else:
  549. raise FormatError()
  550. return cls(a, field_size=field_size, table=t)
  551. def get_size(self):
  552. fs = self.field_size / 8.0
  553. return int(self.tabled_size(8, fs))
  554. def write(self, fobj):
  555. self.write_head(fobj)
  556. write_ulong(fobj, self.field_size & 0xff)
  557. write_ulong(fobj, len(self.table))
  558. def write_u16(f, n):
  559. fobj.write(struct.pack('>H', n))
  560. def write_u8(f, n):
  561. fobj.write(struct.pack('B', n))
  562. def write_2u4(f, n, m):
  563. fobj.write(struct.pack('B', ((n & 0x0f) << 4) | (m & 0x0f)))
  564. if field_size == 16:
  565. for elt in self.table:
  566. write_u16(fobj, elt)
  567. elif field_size == 8:
  568. for elt in self.table:
  569. write_u8(fobj, elt)
  570. elif field_size == 4:
  571. for elt in takeby(self.table, 2):
  572. write_2u4(fobj, *elt)
  573. else:
  574. raise FormatError()
  575. class btrt(Box):
  576. _fields = ('bufferSize', 'maxBitrate', 'avgBitrate')
  577. @classmethod
  578. def read(cls, a):
  579. a.seek_to_data()
  580. bufferSize = atoms.read_ulong(a.f)
  581. maxBitrate = atoms.read_ulong(a.f)
  582. avgBitrate = atoms.read_ulong(a.f)
  583. return cls(a, bufferSize=bufferSize, maxBitrate=maxBitrate,
  584. avgBitrate=avgBitrate)
  585. # from gst and mp4split, which all seem to be from ffmpeg
  586. def read_desc_len(f):
  587. bytes = 0
  588. len = 0
  589. while True:
  590. c = atoms.read_uchar(f)
  591. len <<= 7
  592. len |= c & 0x7f
  593. bytes += 1
  594. if (bytes == 4):
  595. break
  596. if not (c & 0x80):
  597. break
  598. return len
  599. MP4_ELEMENTARY_STREAM_DESCRIPTOR_TAG = 3
  600. MP4_DECODER_CONFIG_DESCRIPTOR_TAG = 4
  601. MP4_DECODER_SPECIFIC_DESCRIPTOR_TAG = 5
  602. class esds(FullBox):
  603. _fields = ('object_type_id', 'maxBitrate', 'avgBitrate', 'data')
  604. @classmethod
  605. @fullboxread
  606. def read(cls, a):
  607. # from mp4split
  608. esdesc = atoms.read_uchar(a.f)
  609. if esdesc == MP4_ELEMENTARY_STREAM_DESCRIPTOR_TAG:
  610. len = read_desc_len(a.f)
  611. stream_id = atoms.read_ushort(a.f)
  612. prio = atoms.read_uchar(a.f)
  613. else:
  614. stream_id = atoms.read_ushort(a.f)
  615. tag = atoms.read_uchar(a.f)
  616. len = read_desc_len(a.f)
  617. if tag != MP4_DECODER_CONFIG_DESCRIPTOR_TAG:
  618. raise FormatError("can't parse esds")
  619. object_type_id = atoms.read_uchar(a.f)
  620. stream_type = atoms.read_uchar(a.f)
  621. buffer_size_db = a.read_bytes(3)
  622. maxBitrate = atoms.read_ulong(a.f)
  623. avgBitrate = atoms.read_ulong(a.f)
  624. tag = atoms.read_uchar(a.f)
  625. len = read_desc_len(a.f)
  626. if tag != MP4_DECODER_SPECIFIC_DESCRIPTOR_TAG:
  627. raise FormatError("can't parse esd")
  628. data = a.read_bytes(len)
  629. return cls(a, object_type_id=object_type_id,
  630. maxBitrate=maxBitrate, avgBitrate=avgBitrate, data=data)
  631. class mp4a(Box):
  632. # TODO: base class for SampleEntry, AudioSampleEntry...
  633. _fields = ('index', 'channelcount', 'samplesize', 'sampleratehi', 'sampleratelo', 'extra')
  634. @classmethod
  635. def read(cls, a):
  636. a.seek_to_data()
  637. a.skip(6) # reserved
  638. idx = atoms.read_ushort(a.f)
  639. version = atoms.read_ushort(a.f)
  640. a.skip(4 + 2) # reserved
  641. channelcount = atoms.read_ushort(a.f)
  642. if channelcount == 3:
  643. channelcount = 6 # from mp4split
  644. samplesize = atoms.read_ushort(a.f)
  645. a.skip(4)
  646. sampleratehi = atoms.read_ushort(a.f)
  647. sampleratelo = atoms.read_ushort(a.f)
  648. # FIXME: parse version != 0 samples_per_packet etc..
  649. # optional boxes follow
  650. extra = list(atoms.read_atoms(a.f, a.size - 36))
  651. a.seek_to_data()
  652. a.skip(36)
  653. extra = map(lambda a: maybe_build_atoms(a.type, [a])[0], extra)
  654. return cls(a, index=idx, channelcount=channelcount, samplesize=samplesize,
  655. sampleratehi=sampleratehi, sampleratelo=sampleratelo,
  656. extra=extra)
  657. class avcC(Box):
  658. _fields = ('version', 'profile', 'level', 'data')
  659. @classmethod
  660. def read(cls, a):
  661. a.seek_to_data()
  662. data = a.read_bytes(a.size - 8)
  663. version = data[0]
  664. profile = data[1]
  665. level = data[3]
  666. return cls(a, version=version, profile=profile, level=level, data=data)
  667. class avc1(Box):
  668. # TODO: base class for SampleEntry, VideoSampleEntry...
  669. _fields = ('index', 'width', 'height', 'comp', 'extra')
  670. @classmethod
  671. def read(cls, a):
  672. a.seek_to_data()
  673. a.skip(6)
  674. idx = atoms.read_ushort(a.f)
  675. a.skip(4 * 4)
  676. width = atoms.read_ushort(a.f)
  677. height = atoms.read_ushort(a.f)
  678. hr = a.read_bytes(4)
  679. vr = a.read_bytes(4)
  680. reserved = atoms.read_ulong(a.f)
  681. fc = atoms.read_ushort(a.f)
  682. comp = a.read_bytes(32)
  683. depth = atoms.read_ushort(a.f)
  684. minusone = atoms.read_short(a.f)
  685. if (minusone != -1):
  686. raise FormatError()
  687. # optional boxes follow
  688. extra = list(atoms.read_atoms(a.f, a.size - 86))
  689. a.seek_to_data()
  690. a.skip(86)
  691. extra = map(lambda a: maybe_build_atoms(a.type, [a])[0], extra)
  692. return cls(a, index=idx, width=width, height=height, comp=comp, extra=extra)
  693. class stsd(FullBox):
  694. _fields = ('count','entries')
  695. @classmethod
  696. @fullboxread
  697. def read(cls, a):
  698. count = read_ulong(a.f)
  699. entries = []
  700. while count > 0:
  701. b = atoms.read_atom(a.f)
  702. entries.append(b)
  703. count = count - 1
  704. entries = map(lambda a: maybe_build_atoms(a.type, [a])[0], entries)
  705. return cls(a, count=count, entries=entries)
  706. class stbl(ContainerBox):
  707. _fields = ('stss', 'stsz', 'stz2', 'stco', 'co64', 'stts', 'ctts', 'stsc', 'stsd')
  708. @classmethod
  709. @containerboxread
  710. def read(cls, a):
  711. (astss, astsz, astz2, astco, aco64, astts, actts, astsc, stsd) = \
  712. select_children_atoms(a, ('stss', 0, 1), ('stsz', 0, 1),
  713. ('stz2', 0, 1), ('stco', 0, 1),
  714. ('co64', 0, 1), ('stts', 1, 1),
  715. ('ctts', 0, 1), ('stsc', 1, 1),
  716. ('stsd', 0, 1))
  717. return cls(a, stss=astss, stsz=astsz, stz2=astz2, stco=astco,
  718. co64=aco64, stts=astts, ctts=actts, stsc=astsc,
  719. stsd=stsd)
  720. class minf(ContainerBox):
  721. _fields = ('stbl',)
  722. @classmethod
  723. @containerboxread
  724. def read(cls, a):
  725. (astbl,) = select_children_atoms(a, ('stbl', 1, 1))
  726. return cls(a, stbl=astbl)
  727. class mdia(ContainerBox):
  728. _fields = ('mdhd', 'minf')
  729. @classmethod
  730. @containerboxread
  731. def read(cls, a):
  732. (amdhd, aminf) = select_children_atoms(a, ('mdhd', 1, 1),
  733. ('minf', 1, 1))
  734. return cls(a, mdhd=amdhd, minf=aminf)
  735. class trak(ContainerBox):
  736. _fields = ('tkhd', 'mdia')
  737. @classmethod
  738. @containerboxread
  739. def read(cls, a):
  740. (atkhd, amdia) = select_children_atoms(a, ('tkhd', 1, 1),
  741. ('mdia', 1, 1))
  742. return cls(a, tkhd=atkhd, mdia=amdia)
  743. class moov(ContainerBox):
  744. _fields = ('mvhd', 'trak')
  745. @classmethod
  746. @containerboxread
  747. def read(cls, a):
  748. (amvhd, traks) = select_children_atoms(a, ('mvhd', 1, 1),
  749. ('trak', 1, None))
  750. return cls(a, mvhd=amvhd, trak=traks)
  751. class ftyp(Box):
  752. _fields = ('brand', 'version')
  753. @classmethod
  754. def read(cls, a):
  755. a.seek_to_data()
  756. brand = read_fcc(a.f)
  757. v = read_ulong(a.f)
  758. return cls(a, brand=brand, version=v)
  759. class tfhd(FullBox):
  760. _fields = ('track_id', )
  761. @classmethod
  762. @fullboxread
  763. def read(cls, a):
  764. track_id = read_ulong(a.f)
  765. return cls(a, track_id=track_id)
  766. class traf(ContainerBox):
  767. _fields = ('tfhd', 'trun', 'sdtp', 'uuid')
  768. @classmethod
  769. @containerboxread
  770. def read(cls, a):
  771. (tfhd, trun, sdtp) = select_children_atoms(a, ('tfhd', 1, 1),
  772. ('trun', 1, 1),
  773. ('sdtp', 0, 1))
  774. uuid = []
  775. return cls(a, tfhd=tfhd, trun=trun, sdtp=sdtp, uuid=uuid)
  776. class moof(ContainerBox):
  777. _fields = ('mfhd', 'traf')
  778. @classmethod
  779. @containerboxread
  780. def read(cls, a):
  781. (mfhd, traf) = select_children_atoms(a, ('mfhd', 1, 1),
  782. ('traf', 1, 1))
  783. return cls(a, mfhd=mfhd, traf=traf)
  784. def read_iso_file(fobj):
  785. fobj.seek(0)
  786. al = list(atoms.read_atoms(fobj))
  787. ad = atoms.atoms_dict(al)
  788. aftyp, amoov, mdat = select_atoms(ad, ('ftyp', 1, 1), ('moov', 1, 1),
  789. ('mdat', 1, None))
  790. # print '(first mdat offset: %d)' % mdat[0].offset
  791. return aftyp, amoov, al
  792. def find_cut_trak_info(atrak, t):
  793. ts = atrak.mdia.mdhd.timescale
  794. stbl = atrak.mdia.minf.stbl
  795. mt = int(round(t * ts))
  796. # print 'media time:', mt, t, ts, t * ts
  797. # print ('finding cut for trak %r @ time %r (%r/%r)' %
  798. # (atrak._atom, t, mt, ts))
  799. sample = find_samplenum_stts(stbl.stts.table, mt)
  800. chunk = find_chunknum_stsc(stbl.stsc.table, sample)
  801. # print ('found sample: %d and chunk: %d/%r' %
  802. # (sample, chunk, stbl.stsc.table[-1]))
  803. stco64 = stbl.stco or stbl.co64
  804. chunk_offset = get_chunk_offset(stco64.table, chunk)
  805. zero_offset = get_chunk_offset(stco64.table, 1)
  806. # print 'found chunk offsets:', chunk_offset, zero_offset
  807. return sample, chunk, zero_offset, chunk_offset
  808. def cut_stco64(stco64, chunk_num, offset_change, first_chunk_delta=0):
  809. new_table = [offset - offset_change for offset in stco64[chunk_num - 1:]]
  810. if new_table and first_chunk_delta:
  811. new_table[0] = new_table[0] + first_chunk_delta
  812. return new_table
  813. def cut_stco64_stsc(stco64, stsc, stsz2, chunk_num, sample_num, offset_change):
  814. new_stsc = None
  815. i, n = 0, len(stsc)
  816. current, per_chunk, sdidx = 1, 0, None
  817. samples = 1
  818. while i < n:
  819. next, next_per_chunk, next_sdidx = stsc[i]
  820. if next > chunk_num:
  821. offset = chunk_num - 1
  822. new_stsc = ([(1, per_chunk, sdidx)]
  823. + [(c - offset, p_c, j)
  824. for (c, p_c, j) in stsc[i:]])
  825. break
  826. samples += (next - current) * per_chunk
  827. current, per_chunk, sdidx = next, next_per_chunk, next_sdidx
  828. i += 1
  829. if new_stsc is None:
  830. new_stsc = [(1, per_chunk, sdidx)]
  831. lead_samples = (sample_num - samples) % per_chunk
  832. bytes_offset = 0
  833. if lead_samples > 0:
  834. bytes_offset = sum(stsz2[sample_num - 1 - lead_samples :
  835. sample_num - 1])
  836. # print 'lead_samples:', lead_samples, 'bytes_offset:', bytes_offset
  837. if lead_samples > 0:
  838. fstsc = new_stsc[0]
  839. new_fstsc = (1, fstsc[1] - lead_samples, fstsc[2])
  840. # print 'old stsc', new_stsc
  841. if len(new_stsc) > 1 and new_stsc[1][0] == 2:
  842. new_stsc[0] = new_fstsc
  843. else:
  844. new_stsc[0:1] = [new_fstsc, (2, fstsc[1], fstsc[2])]
  845. # print 'new stsc', new_stsc
  846. return (cut_stco64(stco64, chunk_num, offset_change, bytes_offset),
  847. new_stsc)
  848. def cut_sctts(sctts, sample):
  849. samples = 1
  850. i, n = 0, len(sctts)
  851. while i < n:
  852. count, delta = sctts[i]
  853. if samples + count > sample:
  854. return [(samples + count - sample, delta)] + sctts[i+1:]
  855. samples += count
  856. i += 1
  857. return [] # ? :/
  858. def cut_stss(stss, sample):
  859. i, n = 0, len(stss)
  860. while i < n:
  861. snum = stss[i]
  862. # print 'cut_stss:', snum, sample
  863. if snum >= sample:
  864. return [s - sample + 1 for s in stss[i:]]
  865. i += 1
  866. return []
  867. def cut_stsz2(stsz2, sample):
  868. if not stsz2:
  869. return []
  870. return stsz2[sample - 1:]
  871. def cut_trak(atrak, sample, data_offset_change):
  872. stbl = atrak.mdia.minf.stbl
  873. chunk = find_chunknum_stsc(stbl.stsc.table, sample)
  874. # print ('cutting trak: %r @ sample %d [chnk %d]' %
  875. # (atrak._atom, sample, chunk))
  876. media_time_diff = find_mediatime_stts(stbl.stts.table, sample) # - 0
  877. new_media_duration = atrak.mdia.mdhd.duration - media_time_diff
  878. """
  879. cut_stco64()
  880. cut_stsc()
  881. cut_stsz2()
  882. cut_sctts(stts)
  883. cut_sctts(ctts)
  884. cut_stss()
  885. """
  886. stco64 = stbl.stco or stbl.co64
  887. stsz2 = stbl.stsz or stbl.stz2
  888. new_stco64_t, new_stsc_t = cut_stco64_stsc(stco64.table, stbl.stsc.table,
  889. stsz2.table, chunk, sample,
  890. data_offset_change)
  891. new_stco64 = stco64.copy(table=new_stco64_t)
  892. new_stsc = stbl.stsc.copy(table=new_stsc_t)
  893. new_stsz2 = stsz2.copy(table=cut_stsz2(stsz2.table, sample))
  894. new_stts = stbl.stts.copy(table=cut_sctts(stbl.stts.table, sample))
  895. new_ctts = None
  896. if stbl.ctts:
  897. new_ctts = stbl.ctts.copy(table=cut_sctts(stbl.ctts.table, sample))
  898. new_stss = None
  899. if stbl.stss:
  900. new_stss = stbl.stss.copy(table=cut_stss(stbl.stss.table, sample))
  901. """
  902. new_mdhd = atrak.mdia.mdhd.copy()
  903. new_minf = atrak.mdia.minf.copy()
  904. new_mdia = atrak.mdia.copy()
  905. new_trak = atrak.copy()
  906. """
  907. stbl_attribs = dict(stts=new_stts, stsc=new_stsc)
  908. stbl_attribs[stbl.stco and 'stco' or 'co64'] = new_stco64
  909. stbl_attribs[stbl.stsz and 'stsz' or 'stz2'] = new_stsz2
  910. if new_ctts:
  911. stbl_attribs['ctts'] = new_ctts
  912. if new_stss:
  913. stbl_attribs['stss'] = new_stss
  914. new_stbl = stbl.copy(**stbl_attribs)
  915. new_minf = atrak.mdia.minf.copy(stbl=new_stbl)
  916. new_mdhd = atrak.mdia.mdhd.copy(duration=new_media_duration)
  917. new_mdia = atrak.mdia.copy(mdhd=new_mdhd, minf=new_minf)
  918. new_tkhd = atrak.tkhd.copy()
  919. new_trak = atrak.copy(tkhd=new_tkhd, mdia=new_mdia)
  920. # print 'old trak:'
  921. # print atrak
  922. return new_trak
  923. def update_offsets(atrak, data_offset_change):
  924. """
  925. cut_stco64(stco64, 1, ...) # again, after calculating new size of moov
  926. atrak.mdia.mdhd.duration = new_duration
  927. """
  928. # print 'offset updates:'
  929. # print atrak
  930. stbl = atrak.mdia.minf.stbl
  931. stco64 = stbl.stco or stbl.co64
  932. stco64.table = cut_stco64(stco64.table, 1, data_offset_change)
  933. # print atrak
  934. # print
  935. def cut_moov(amoov, t):
  936. ts = amoov.mvhd.timescale
  937. duration = amoov.mvhd.duration
  938. if t * ts >= duration:
  939. raise RuntimeError('Exceeded file duration: %r' %
  940. (duration / float(ts)))
  941. traks = amoov.trak
  942. # print 'movie timescale: %d, num tracks: %d' % (ts, len(traks))
  943. # print
  944. cut_info = map(lambda a: find_cut_trak_info(a, t), traks)
  945. # print 'cut_info:', cut_info
  946. new_data_offset = min([ci[3] for ci in cut_info])
  947. zero_offset = min([ci[2] for ci in cut_info])
  948. # print 'new offset: %d, delta: %d' % (new_data_offset,
  949. # new_data_offset - zero_offset)
  950. new_traks = map(lambda a, ci: cut_trak(a, ci[0],
  951. new_data_offset - zero_offset),
  952. traks, cut_info)
  953. new_moov = amoov.copy(mvhd=amoov.mvhd.copy(), trak=new_traks)
  954. moov_size_diff = amoov.get_size() - new_moov.get_size()
  955. # print ('moov_size_diff', moov_size_diff, amoov.get_size(),
  956. # new_moov.get_size())
  957. # print 'real moov sizes', amoov._atom.size, new_moov._atom.size
  958. # print 'new mdat start', zero_offset - moov_size_diff - 8
  959. def update_trak_duration(atrak):
  960. amdhd = atrak.mdia.mdhd
  961. new_duration = amdhd.duration * ts // amdhd.timescale # ... different
  962. # rounding? :/
  963. atrak.tkhd.duration = new_duration
  964. # print
  965. map(update_trak_duration, new_traks)
  966. map(lambda a: update_offsets(a, moov_size_diff), new_traks)
  967. return new_moov, new_data_offset - zero_offset, new_data_offset
  968. def split_atoms(f, out_f, t):
  969. aftype, amoov, alist = read_iso_file(f)
  970. t = find_nearest_syncpoint(amoov, t)
  971. # print 'nearest syncpoint:', t
  972. nmoov, delta, new_offset = cut_moov(amoov, t)
  973. write_split_header(out_f, nmoov, alist, delta)
  974. return new_offset
  975. def update_mdat_atoms(alist, size_delta):
  976. updated = []
  977. to_remove = size_delta
  978. pos = alist[0].offset
  979. for a in alist:
  980. data_size = a.size - a.head_size()
  981. size_change = min(data_size, to_remove)
  982. if size_change > 0:
  983. to_remove -= size_change
  984. new_size = real_size = a.size - size_change
  985. if a.real_size == 1:
  986. real_size = 1
  987. updated.append(atoms.Atom(new_size, 'mdat', pos, a.f,
  988. real_size=real_size))
  989. if to_remove == 0:
  990. break
  991. pos += new_size
  992. return updated
  993. def write_split_header(out_f, amoov, alist, size_delta):
  994. moov_idx = find_atom(alist, 'moov')
  995. mdat_idx = find_atom(alist, 'mdat')
  996. mdat = alist[mdat_idx]
  997. cut_offset = mdat.offset + mdat.head_size() + size_delta
  998. to_update = [a for a in alist[mdat_idx:] if a.offset < cut_offset]
  999. if [a for a in to_update if a.type != 'mdat']:
  1000. raise FormatError('"mdat" and non-"mdat" (to-update) atoms mixed')
  1001. updated_mdats = update_mdat_atoms(to_update, size_delta)
  1002. alist[moov_idx] = amoov
  1003. write_atoms(alist[:mdat_idx], out_f)
  1004. for a in updated_mdats:
  1005. write_ulong(out_f, a.real_size)
  1006. write_fcc(out_f, a.type)
  1007. if a.real_size == 1:
  1008. write_ulonglong(out_f, a.size)
  1009. def split(f, t, out_f=None):
  1010. wf = out_f
  1011. if wf is None:
  1012. from cStringIO import StringIO
  1013. wf = StringIO()
  1014. new_offset = split_atoms(f, wf, t)
  1015. return wf, new_offset
  1016. def split_and_write(in_f, out_f, t):
  1017. header_f, new_offset = split(in_f, t)
  1018. header_f.seek(0)
  1019. out_f.write(header_f.read())
  1020. in_f.seek(new_offset)
  1021. out_f.write(in_f.read())
  1022. def main(f, t):
  1023. split_and_write(f, file('/tmp/t.mp4', 'w'), t)
  1024. def find_sync_points(amoov):
  1025. ts = amoov.mvhd.timescale
  1026. traks = amoov.trak
  1027. def find_sync_samples(a):
  1028. stbl = a.mdia.minf.stbl
  1029. if not stbl.stss:
  1030. return []
  1031. stss = stbl.stss
  1032. stts = stbl.stts.table
  1033. ts = float(a.mdia.mdhd.timescale)
  1034. return map(lambda mt: mt / ts, find_mediatimes(stts, stss.table))
  1035. sync_tables = [t for t in map(find_sync_samples, traks) if t]
  1036. if sync_tables:
  1037. # ideally there should be only one sync table (from a video
  1038. # trak) - an arbitrary one will be taken otherwise...
  1039. return sync_tables[0]
  1040. return []
  1041. def find_nearest_syncpoint(amoov, t):
  1042. syncs = find_sync_points(amoov)
  1043. if not syncs:
  1044. # hardcoding duration - 0.1 sec as the farthest seek pos for now...
  1045. max_ts = amoov.mvhd.duration / float(amoov.mvhd.timescale) - 0.1
  1046. return max(0, min(t, max_ts))
  1047. found = 0
  1048. other = 0
  1049. for ss in syncs:
  1050. if ss > t:
  1051. other = ss
  1052. break
  1053. found = ss
  1054. if (abs(t - found) < abs(other - t)):
  1055. return found
  1056. return other
  1057. def get_nearest_syncpoint(f, t):
  1058. aftyp, amoov, alist = read_iso_file(f)
  1059. print find_nearest_syncpoint(amoov, t)
  1060. def get_sync_points(f):
  1061. aftyp, amoov, alist = read_iso_file(f)
  1062. return find_sync_points(amoov)
  1063. def get_debugging(f):
  1064. aftyp, amoov, alist = read_iso_file(f)
  1065. ts = amoov.mvhd.timescale
  1066. print aftyp
  1067. traks = amoov.trak
  1068. from pprint import pprint
  1069. pprint(map(lambda a: a.mdia.minf.stbl.stco, traks))
  1070. def change_chunk_offsets(amoov, data_offset):
  1071. """
  1072. @param data_offset: number of bytes to add to chunk offsets in all
  1073. traks of amoov
  1074. @type data_offset: int
  1075. """
  1076. # FIXME: make the offset direction sane in update_offsets...?
  1077. map(lambda a: update_offsets(a, - data_offset), amoov.trak)
  1078. def move_header_to_front(f):
  1079. aftype, amoov, alist = read_iso_file(f)
  1080. moov_idx = find_atom(alist, 'moov')
  1081. mdat_idx = find_atom(alist, 'mdat')
  1082. if moov_idx < mdat_idx:
  1083. # nothing to be done
  1084. return None
  1085. adict = atoms.atoms_dict(alist)
  1086. mdat = alist[mdat_idx]
  1087. new_moov_idx = mdat_idx
  1088. if 'wide' in adict:
  1089. # if 'wide' atom preceeds 'mdat', let's keep it that way
  1090. for wide in adict['wide']:
  1091. if wide.offset + wide.size == mdat.offset:
  1092. new_moov_idx -= 1
  1093. break
  1094. # for the moment assuming rewriting offsets in moov won't change
  1095. # the atoms sizes - could happen if:
  1096. # 2**32 - 1 - last_chunk_offset < moov.size
  1097. data_offset = amoov.get_size()
  1098. change_chunk_offsets(amoov, data_offset)
  1099. del alist[moov_idx]
  1100. alist[new_moov_idx:new_moov_idx] = [amoov]
  1101. return alist
  1102. def move_header_and_write(in_f, out_f):
  1103. alist = move_header_to_front(in_f)
  1104. if alist:
  1105. write_atoms(alist, out_f)
  1106. return True
  1107. return False
  1108. if __name__ == '__main__':
  1109. import sys
  1110. f = file(sys.argv[1])
  1111. if len(sys.argv) > 2:
  1112. t = float(sys.argv[2])
  1113. main(f, t)
  1114. # get_nearest_syncpoint(f, t)
  1115. else:
  1116. print get_sync_points(f)
  1117. # get_debugging(f)