You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2400 lines
103 KiB

4 years ago
  1. """
  2. olefile (formerly OleFileIO_PL)
  3. Module to read/write Microsoft OLE2 files (also called Structured Storage or
  4. Microsoft Compound Document File Format), such as Microsoft Office 97-2003
  5. documents, Image Composer and FlashPix files, Outlook messages, ...
  6. This version is compatible with Python 2.7 and 3.4+
  7. Project website: https://www.decalage.info/olefile
  8. olefile is copyright (c) 2005-2018 Philippe Lagadec
  9. (https://www.decalage.info)
  10. olefile is based on the OleFileIO module from the PIL library v1.1.7
  11. See: http://www.pythonware.com/products/pil/index.htm
  12. and http://svn.effbot.org/public/tags/pil-1.1.7/PIL/OleFileIO.py
  13. The Python Imaging Library (PIL) is
  14. Copyright (c) 1997-2009 by Secret Labs AB
  15. Copyright (c) 1995-2009 by Fredrik Lundh
  16. See source code and LICENSE.txt for information on usage and redistribution.
  17. """
  18. # Since OleFileIO_PL v0.45, only Python 2.7 and 3.4+ are supported
  19. # This import enables print() as a function rather than a keyword
  20. # (main requirement to be compatible with Python 3.x)
  21. # The comment on the line below should be printed on Python 2.5 or older:
  22. from __future__ import print_function # This version of olefile requires Python 2.7 or 3.4+.
  23. #--- LICENSE ------------------------------------------------------------------
  24. # olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec
  25. # (https://www.decalage.info)
  26. #
  27. # All rights reserved.
  28. #
  29. # Redistribution and use in source and binary forms, with or without modification,
  30. # are permitted provided that the following conditions are met:
  31. #
  32. # * Redistributions of source code must retain the above copyright notice, this
  33. # list of conditions and the following disclaimer.
  34. # * Redistributions in binary form must reproduce the above copyright notice,
  35. # this list of conditions and the following disclaimer in the documentation
  36. # and/or other materials provided with the distribution.
  37. #
  38. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  39. # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  40. # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  41. # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  42. # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  43. # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  44. # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  45. # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  46. # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48. # ----------
  49. # PIL License:
  50. #
  51. # olefile is based on source code from the OleFileIO module of the Python
  52. # Imaging Library (PIL) published by Fredrik Lundh under the following license:
  53. # The Python Imaging Library (PIL) is
  54. # Copyright (c) 1997-2009 by Secret Labs AB
  55. # Copyright (c) 1995-2009 by Fredrik Lundh
  56. #
  57. # By obtaining, using, and/or copying this software and/or its associated
  58. # documentation, you agree that you have read, understood, and will comply with
  59. # the following terms and conditions:
  60. #
  61. # Permission to use, copy, modify, and distribute this software and its
  62. # associated documentation for any purpose and without fee is hereby granted,
  63. # provided that the above copyright notice appears in all copies, and that both
  64. # that copyright notice and this permission notice appear in supporting
  65. # documentation, and that the name of Secret Labs AB or the author(s) not be used
  66. # in advertising or publicity pertaining to distribution of the software
  67. # without specific, written prior permission.
  68. #
  69. # SECRET LABS AB AND THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
  70. # SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
  71. # IN NO EVENT SHALL SECRET LABS AB OR THE AUTHORS BE LIABLE FOR ANY SPECIAL,
  72. # INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  73. # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
  74. # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  75. # PERFORMANCE OF THIS SOFTWARE.
  76. __date__ = "2018-09-09"
  77. __version__ = '0.46'
  78. __author__ = "Philippe Lagadec"
  79. __all__ = ['isOleFile', 'OleFileIO', 'OleMetadata', 'enable_logging',
  80. 'MAGIC', 'STGTY_EMPTY', 'KEEP_UNICODE_NAMES',
  81. 'STGTY_STREAM', 'STGTY_STORAGE', 'STGTY_ROOT', 'STGTY_PROPERTY',
  82. 'STGTY_LOCKBYTES', 'MINIMAL_OLEFILE_SIZE',
  83. 'DEFECT_UNSURE', 'DEFECT_POTENTIAL', 'DEFECT_INCORRECT',
  84. 'DEFECT_FATAL', 'DEFAULT_PATH_ENCODING',
  85. 'MAXREGSECT', 'DIFSECT', 'FATSECT', 'ENDOFCHAIN', 'FREESECT',
  86. 'MAXREGSID', 'NOSTREAM', 'UNKNOWN_SIZE', 'WORD_CLSID'
  87. ]
  88. import io
  89. import sys
  90. import struct, array, os.path, datetime, logging
  91. #=== COMPATIBILITY WORKAROUNDS ================================================
  92. # For Python 3.x, need to redefine long as int:
  93. if str is not bytes:
  94. long = int
  95. # Need to make sure we use xrange both on Python 2 and 3.x:
  96. try:
  97. # on Python 2 we need xrange:
  98. iterrange = xrange
  99. except:
  100. # no xrange, for Python 3 it was renamed as range:
  101. iterrange = range
  102. #[PL] workaround to fix an issue with array item size on 64 bits systems:
  103. if array.array('L').itemsize == 4:
  104. # on 32 bits platforms, long integers in an array are 32 bits:
  105. UINT32 = 'L'
  106. elif array.array('I').itemsize == 4:
  107. # on 64 bits platforms, integers in an array are 32 bits:
  108. UINT32 = 'I'
  109. elif array.array('i').itemsize == 4:
  110. # On 64 bit Jython, signed integers ('i') are the only way to store our 32
  111. # bit values in an array in a *somewhat* reasonable way, as the otherwise
  112. # perfectly suited 'H' (unsigned int, 32 bits) results in a completely
  113. # unusable behaviour. This is most likely caused by the fact that Java
  114. # doesn't have unsigned values, and thus Jython's "array" implementation,
  115. # which is based on "jarray", doesn't have them either.
  116. # NOTE: to trick Jython into converting the values it would normally
  117. # interpret as "signed" into "unsigned", a binary-and operation with
  118. # 0xFFFFFFFF can be used. This way it is possible to use the same comparing
  119. # operations on all platforms / implementations. The corresponding code
  120. # lines are flagged with a 'JYTHON-WORKAROUND' tag below.
  121. UINT32 = 'i'
  122. else:
  123. raise ValueError('Need to fix a bug with 32 bit arrays, please contact author...')
  124. #[PL] These workarounds were inspired from the Path module
  125. # (see http://www.jorendorff.com/articles/python/path/)
  126. try:
  127. basestring
  128. except NameError:
  129. basestring = str
  130. #[PL] Experimental setting: if True, OLE filenames will be kept in Unicode
  131. # if False (default PIL behaviour), all filenames are converted to Latin-1.
  132. KEEP_UNICODE_NAMES = True
  133. if sys.version_info[0] < 3:
  134. # On Python 2.x, the default encoding for path names is UTF-8:
  135. DEFAULT_PATH_ENCODING = 'utf-8'
  136. else:
  137. # On Python 3.x, the default encoding for path names is Unicode (None):
  138. DEFAULT_PATH_ENCODING = None
  139. # === LOGGING =================================================================
  140. def get_logger(name, level=logging.CRITICAL+1):
  141. """
  142. Create a suitable logger object for this module.
  143. The goal is not to change settings of the root logger, to avoid getting
  144. other modules' logs on the screen.
  145. If a logger exists with same name, reuse it. (Else it would have duplicate
  146. handlers and messages would be doubled.)
  147. The level is set to CRITICAL+1 by default, to avoid any logging.
  148. """
  149. # First, test if there is already a logger with the same name, else it
  150. # will generate duplicate messages (due to duplicate handlers):
  151. if name in logging.Logger.manager.loggerDict:
  152. #NOTE: another less intrusive but more "hackish" solution would be to
  153. # use getLogger then test if its effective level is not default.
  154. logger = logging.getLogger(name)
  155. # make sure level is OK:
  156. logger.setLevel(level)
  157. return logger
  158. # get a new logger:
  159. logger = logging.getLogger(name)
  160. # only add a NullHandler for this logger, it is up to the application
  161. # to configure its own logging:
  162. logger.addHandler(logging.NullHandler())
  163. logger.setLevel(level)
  164. return logger
  165. # a global logger object used for debugging:
  166. log = get_logger('olefile')
  167. def enable_logging():
  168. """
  169. Enable logging for this module (disabled by default).
  170. This will set the module-specific logger level to NOTSET, which
  171. means the main application controls the actual logging level.
  172. """
  173. log.setLevel(logging.NOTSET)
  174. #=== CONSTANTS ===============================================================
  175. #: magic bytes that should be at the beginning of every OLE file:
  176. MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
  177. #[PL]: added constants for Sector IDs (from AAF specifications)
  178. MAXREGSECT = 0xFFFFFFFA #: (-6) maximum SECT
  179. DIFSECT = 0xFFFFFFFC #: (-4) denotes a DIFAT sector in a FAT
  180. FATSECT = 0xFFFFFFFD #: (-3) denotes a FAT sector in a FAT
  181. ENDOFCHAIN = 0xFFFFFFFE #: (-2) end of a virtual stream chain
  182. FREESECT = 0xFFFFFFFF #: (-1) unallocated sector
  183. #[PL]: added constants for Directory Entry IDs (from AAF specifications)
  184. MAXREGSID = 0xFFFFFFFA #: (-6) maximum directory entry ID
  185. NOSTREAM = 0xFFFFFFFF #: (-1) unallocated directory entry
  186. #[PL] object types in storage (from AAF specifications)
  187. STGTY_EMPTY = 0 #: empty directory entry
  188. STGTY_STORAGE = 1 #: element is a storage object
  189. STGTY_STREAM = 2 #: element is a stream object
  190. STGTY_LOCKBYTES = 3 #: element is an ILockBytes object
  191. STGTY_PROPERTY = 4 #: element is an IPropertyStorage object
  192. STGTY_ROOT = 5 #: element is a root storage
  193. # Unknown size for a stream (used by OleStream):
  194. UNKNOWN_SIZE = 0x7FFFFFFF
  195. #
  196. # --------------------------------------------------------------------
  197. # property types
  198. VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
  199. VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
  200. VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
  201. VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
  202. VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
  203. VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
  204. VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
  205. VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
  206. VT_VECTOR=0x1000;
  207. # map property id to name (for debugging purposes)
  208. # VT = {}
  209. # for keyword, var in list(vars().items()):
  210. # if keyword[:3] == "VT_":
  211. # VT[var] = keyword
  212. #
  213. # --------------------------------------------------------------------
  214. # Some common document types (root.clsid fields)
  215. WORD_CLSID = "00020900-0000-0000-C000-000000000046"
  216. #TODO: check Excel, PPT, ...
  217. #[PL]: Defect levels to classify parsing errors - see OleFileIO._raise_defect()
  218. DEFECT_UNSURE = 10 # a case which looks weird, but not sure it's a defect
  219. DEFECT_POTENTIAL = 20 # a potential defect
  220. DEFECT_INCORRECT = 30 # an error according to specifications, but parsing
  221. # can go on
  222. DEFECT_FATAL = 40 # an error which cannot be ignored, parsing is
  223. # impossible
  224. # Minimal size of an empty OLE file, with 512-bytes sectors = 1536 bytes
  225. # (this is used in isOleFile and OleFile.open)
  226. MINIMAL_OLEFILE_SIZE = 1536
  227. #=== FUNCTIONS ===============================================================
  228. def isOleFile (filename):
  229. """
  230. Test if a file is an OLE container (according to the magic bytes in its header).
  231. .. note::
  232. This function only checks the first 8 bytes of the file, not the
  233. rest of the OLE structure.
  234. .. versionadded:: 0.16
  235. :param filename: filename, contents or file-like object of the OLE file (string-like or file-like object)
  236. - if filename is a string smaller than 1536 bytes, it is the path
  237. of the file to open. (bytes or unicode string)
  238. - if filename is a string longer than 1535 bytes, it is parsed
  239. as the content of an OLE file in memory. (bytes type only)
  240. - if filename is a file-like object (with read and seek methods),
  241. it is parsed as-is.
  242. :type filename: bytes or str or unicode or file
  243. :returns: True if OLE, False otherwise.
  244. :rtype: bool
  245. """
  246. # check if filename is a string-like or file-like object:
  247. if hasattr(filename, 'read'):
  248. # file-like object: use it directly
  249. header = filename.read(len(MAGIC))
  250. # just in case, seek back to start of file:
  251. filename.seek(0)
  252. elif isinstance(filename, bytes) and len(filename) >= MINIMAL_OLEFILE_SIZE:
  253. # filename is a bytes string containing the OLE file to be parsed:
  254. header = filename[:len(MAGIC)]
  255. else:
  256. # string-like object: filename of file on disk
  257. with open(filename, 'rb') as fp:
  258. header = fp.read(len(MAGIC))
  259. if header == MAGIC:
  260. return True
  261. else:
  262. return False
  263. if bytes is str:
  264. # version for Python 2.x
  265. def i8(c):
  266. return ord(c)
  267. else:
  268. # version for Python 3.x
  269. def i8(c):
  270. return c if c.__class__ is int else c[0]
  271. def i16(c, o = 0):
  272. """
  273. Converts a 2-bytes (16 bits) string to an integer.
  274. :param c: string containing bytes to convert
  275. :param o: offset of bytes to convert in string
  276. """
  277. return struct.unpack("<H", c[o:o+2])[0]
  278. def i32(c, o = 0):
  279. """
  280. Converts a 4-bytes (32 bits) string to an integer.
  281. :param c: string containing bytes to convert
  282. :param o: offset of bytes to convert in string
  283. """
  284. return struct.unpack("<I", c[o:o+4])[0]
  285. def _clsid(clsid):
  286. """
  287. Converts a CLSID to a human-readable string.
  288. :param clsid: string of length 16.
  289. """
  290. assert len(clsid) == 16
  291. # if clsid is only made of null bytes, return an empty string:
  292. # (PL: why not simply return the string with zeroes?)
  293. if not clsid.strip(b"\0"):
  294. return ""
  295. return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
  296. ((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
  297. tuple(map(i8, clsid[8:16]))))
  298. def filetime2datetime(filetime):
  299. """
  300. convert FILETIME (64 bits int) to Python datetime.datetime
  301. """
  302. # TODO: manage exception when microseconds is too large
  303. # inspired from https://code.activestate.com/recipes/511425-filetime-to-datetime/
  304. _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0)
  305. #log.debug('timedelta days=%d' % (filetime//(10*1000000*3600*24)))
  306. return _FILETIME_null_date + datetime.timedelta(microseconds=filetime//10)
  307. #=== CLASSES ==================================================================
  308. class OleMetadata:
  309. """
  310. class to parse and store metadata from standard properties of OLE files.
  311. Available attributes:
  312. codepage, title, subject, author, keywords, comments, template,
  313. last_saved_by, revision_number, total_edit_time, last_printed, create_time,
  314. last_saved_time, num_pages, num_words, num_chars, thumbnail,
  315. creating_application, security, codepage_doc, category, presentation_target,
  316. bytes, lines, paragraphs, slides, notes, hidden_slides, mm_clips,
  317. scale_crop, heading_pairs, titles_of_parts, manager, company, links_dirty,
  318. chars_with_spaces, unused, shared_doc, link_base, hlinks, hlinks_changed,
  319. version, dig_sig, content_type, content_status, language, doc_version
  320. Note: an attribute is set to None when not present in the properties of the
  321. OLE file.
  322. References for SummaryInformation stream:
  323. - https://msdn.microsoft.com/en-us/library/dd942545.aspx
  324. - https://msdn.microsoft.com/en-us/library/dd925819%28v=office.12%29.aspx
  325. - https://msdn.microsoft.com/en-us/library/windows/desktop/aa380376%28v=vs.85%29.aspx
  326. - https://msdn.microsoft.com/en-us/library/aa372045.aspx
  327. - http://sedna-soft.de/articles/summary-information-stream/
  328. - https://poi.apache.org/apidocs/org/apache/poi/hpsf/SummaryInformation.html
  329. References for DocumentSummaryInformation stream:
  330. - https://msdn.microsoft.com/en-us/library/dd945671%28v=office.12%29.aspx
  331. - https://msdn.microsoft.com/en-us/library/windows/desktop/aa380374%28v=vs.85%29.aspx
  332. - https://poi.apache.org/apidocs/org/apache/poi/hpsf/DocumentSummaryInformation.html
  333. new in version 0.25
  334. """
  335. # attribute names for SummaryInformation stream properties:
  336. # (ordered by property id, starting at 1)
  337. SUMMARY_ATTRIBS = ['codepage', 'title', 'subject', 'author', 'keywords', 'comments',
  338. 'template', 'last_saved_by', 'revision_number', 'total_edit_time',
  339. 'last_printed', 'create_time', 'last_saved_time', 'num_pages',
  340. 'num_words', 'num_chars', 'thumbnail', 'creating_application',
  341. 'security']
  342. # attribute names for DocumentSummaryInformation stream properties:
  343. # (ordered by property id, starting at 1)
  344. DOCSUM_ATTRIBS = ['codepage_doc', 'category', 'presentation_target', 'bytes', 'lines', 'paragraphs',
  345. 'slides', 'notes', 'hidden_slides', 'mm_clips',
  346. 'scale_crop', 'heading_pairs', 'titles_of_parts', 'manager',
  347. 'company', 'links_dirty', 'chars_with_spaces', 'unused', 'shared_doc',
  348. 'link_base', 'hlinks', 'hlinks_changed', 'version', 'dig_sig',
  349. 'content_type', 'content_status', 'language', 'doc_version']
  350. def __init__(self):
  351. """
  352. Constructor for OleMetadata
  353. All attributes are set to None by default
  354. """
  355. # properties from SummaryInformation stream
  356. self.codepage = None
  357. self.title = None
  358. self.subject = None
  359. self.author = None
  360. self.keywords = None
  361. self.comments = None
  362. self.template = None
  363. self.last_saved_by = None
  364. self.revision_number = None
  365. self.total_edit_time = None
  366. self.last_printed = None
  367. self.create_time = None
  368. self.last_saved_time = None
  369. self.num_pages = None
  370. self.num_words = None
  371. self.num_chars = None
  372. self.thumbnail = None
  373. self.creating_application = None
  374. self.security = None
  375. # properties from DocumentSummaryInformation stream
  376. self.codepage_doc = None
  377. self.category = None
  378. self.presentation_target = None
  379. self.bytes = None
  380. self.lines = None
  381. self.paragraphs = None
  382. self.slides = None
  383. self.notes = None
  384. self.hidden_slides = None
  385. self.mm_clips = None
  386. self.scale_crop = None
  387. self.heading_pairs = None
  388. self.titles_of_parts = None
  389. self.manager = None
  390. self.company = None
  391. self.links_dirty = None
  392. self.chars_with_spaces = None
  393. self.unused = None
  394. self.shared_doc = None
  395. self.link_base = None
  396. self.hlinks = None
  397. self.hlinks_changed = None
  398. self.version = None
  399. self.dig_sig = None
  400. self.content_type = None
  401. self.content_status = None
  402. self.language = None
  403. self.doc_version = None
  404. def parse_properties(self, olefile):
  405. """
  406. Parse standard properties of an OLE file, from the streams
  407. ``\\x05SummaryInformation`` and ``\\x05DocumentSummaryInformation``,
  408. if present.
  409. Properties are converted to strings, integers or python datetime objects.
  410. If a property is not present, its value is set to None.
  411. """
  412. # first set all attributes to None:
  413. for attrib in (self.SUMMARY_ATTRIBS + self.DOCSUM_ATTRIBS):
  414. setattr(self, attrib, None)
  415. if olefile.exists("\x05SummaryInformation"):
  416. # get properties from the stream:
  417. # (converting timestamps to python datetime, except total_edit_time,
  418. # which is property #10)
  419. props = olefile.getproperties("\x05SummaryInformation",
  420. convert_time=True, no_conversion=[10])
  421. # store them into this object's attributes:
  422. for i in range(len(self.SUMMARY_ATTRIBS)):
  423. # ids for standards properties start at 0x01, until 0x13
  424. value = props.get(i+1, None)
  425. setattr(self, self.SUMMARY_ATTRIBS[i], value)
  426. if olefile.exists("\x05DocumentSummaryInformation"):
  427. # get properties from the stream:
  428. props = olefile.getproperties("\x05DocumentSummaryInformation",
  429. convert_time=True)
  430. # store them into this object's attributes:
  431. for i in range(len(self.DOCSUM_ATTRIBS)):
  432. # ids for standards properties start at 0x01, until 0x13
  433. value = props.get(i+1, None)
  434. setattr(self, self.DOCSUM_ATTRIBS[i], value)
  435. def dump(self):
  436. """
  437. Dump all metadata, for debugging purposes.
  438. """
  439. print('Properties from SummaryInformation stream:')
  440. for prop in self.SUMMARY_ATTRIBS:
  441. value = getattr(self, prop)
  442. print('- %s: %s' % (prop, repr(value)))
  443. print('Properties from DocumentSummaryInformation stream:')
  444. for prop in self.DOCSUM_ATTRIBS:
  445. value = getattr(self, prop)
  446. print('- %s: %s' % (prop, repr(value)))
  447. #--- OleStream ---------------------------------------------------------------
  448. class OleStream(io.BytesIO):
  449. """
  450. OLE2 Stream
  451. Returns a read-only file object which can be used to read
  452. the contents of a OLE stream (instance of the BytesIO class).
  453. To open a stream, use the openstream method in the OleFile class.
  454. This function can be used with either ordinary streams,
  455. or ministreams, depending on the offset, sectorsize, and
  456. fat table arguments.
  457. Attributes:
  458. - size: actual size of data stream, after it was opened.
  459. """
  460. # FIXME: should store the list of sects obtained by following
  461. # the fat chain, and load new sectors on demand instead of
  462. # loading it all in one go.
  463. def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize, olefileio):
  464. """
  465. Constructor for OleStream class.
  466. :param fp: file object, the OLE container or the MiniFAT stream
  467. :param sect: sector index of first sector in the stream
  468. :param size: total size of the stream
  469. :param offset: offset in bytes for the first FAT or MiniFAT sector
  470. :param sectorsize: size of one sector
  471. :param fat: array/list of sector indexes (FAT or MiniFAT)
  472. :param filesize: size of OLE file (for debugging)
  473. :param olefileio: OleFileIO object containing this stream
  474. :returns: a BytesIO instance containing the OLE stream
  475. """
  476. log.debug('OleStream.__init__:')
  477. log.debug(' sect=%d (%X), size=%d, offset=%d, sectorsize=%d, len(fat)=%d, fp=%s'
  478. %(sect,sect,size,offset,sectorsize,len(fat), repr(fp)))
  479. self.ole = olefileio
  480. # this check is necessary, otherwise when attempting to open a stream
  481. # from a closed OleFile, a stream of size zero is returned without
  482. # raising an exception. (see issue #81)
  483. if self.ole.fp.closed:
  484. raise OSError('Attempting to open a stream from a closed OLE File')
  485. #[PL] To detect malformed documents with FAT loops, we compute the
  486. # expected number of sectors in the stream:
  487. unknown_size = False
  488. if size == UNKNOWN_SIZE:
  489. # this is the case when called from OleFileIO._open(), and stream
  490. # size is not known in advance (for example when reading the
  491. # Directory stream). Then we can only guess maximum size:
  492. size = len(fat)*sectorsize
  493. # and we keep a record that size was unknown:
  494. unknown_size = True
  495. log.debug(' stream with UNKNOWN SIZE')
  496. nb_sectors = (size + (sectorsize-1)) // sectorsize
  497. log.debug('nb_sectors = %d' % nb_sectors)
  498. # This number should (at least) be less than the total number of
  499. # sectors in the given FAT:
  500. if nb_sectors > len(fat):
  501. self.ole._raise_defect(DEFECT_INCORRECT, 'malformed OLE document, stream too large')
  502. # optimization(?): data is first a list of strings, and join() is called
  503. # at the end to concatenate all in one string.
  504. # (this may not be really useful with recent Python versions)
  505. data = []
  506. # if size is zero, then first sector index should be ENDOFCHAIN:
  507. if size == 0 and sect != ENDOFCHAIN:
  508. log.debug('size == 0 and sect != ENDOFCHAIN:')
  509. self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE sector index for empty stream')
  510. #[PL] A fixed-length for loop is used instead of an undefined while
  511. # loop to avoid DoS attacks:
  512. for i in range(nb_sectors):
  513. log.debug('Reading stream sector[%d] = %Xh' % (i, sect))
  514. # Sector index may be ENDOFCHAIN, but only if size was unknown
  515. if sect == ENDOFCHAIN:
  516. if unknown_size:
  517. log.debug('Reached ENDOFCHAIN sector for stream with unknown size')
  518. break
  519. else:
  520. # else this means that the stream is smaller than declared:
  521. log.debug('sect=ENDOFCHAIN before expected size')
  522. self.ole._raise_defect(DEFECT_INCORRECT, 'incomplete OLE stream')
  523. # sector index should be within FAT:
  524. if sect<0 or sect>=len(fat):
  525. log.debug('sect=%d (%X) / len(fat)=%d' % (sect, sect, len(fat)))
  526. log.debug('i=%d / nb_sectors=%d' %(i, nb_sectors))
  527. ## tmp_data = b"".join(data)
  528. ## f = open('test_debug.bin', 'wb')
  529. ## f.write(tmp_data)
  530. ## f.close()
  531. ## log.debug('data read so far: %d bytes' % len(tmp_data))
  532. self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE FAT, sector index out of range')
  533. # stop reading here if the exception is ignored:
  534. break
  535. #TODO: merge this code with OleFileIO.getsect() ?
  536. #TODO: check if this works with 4K sectors:
  537. try:
  538. fp.seek(offset + sectorsize * sect)
  539. except:
  540. log.debug('sect=%d, seek=%d, filesize=%d' %
  541. (sect, offset+sectorsize*sect, filesize))
  542. self.ole._raise_defect(DEFECT_INCORRECT, 'OLE sector index out of range')
  543. # stop reading here if the exception is ignored:
  544. break
  545. sector_data = fp.read(sectorsize)
  546. # [PL] check if there was enough data:
  547. # Note: if sector is the last of the file, sometimes it is not a
  548. # complete sector (of 512 or 4K), so we may read less than
  549. # sectorsize.
  550. if len(sector_data)!=sectorsize and sect!=(len(fat)-1):
  551. log.debug('sect=%d / len(fat)=%d, seek=%d / filesize=%d, len read=%d' %
  552. (sect, len(fat), offset+sectorsize*sect, filesize, len(sector_data)))
  553. log.debug('seek+len(read)=%d' % (offset+sectorsize*sect+len(sector_data)))
  554. self.ole._raise_defect(DEFECT_INCORRECT, 'incomplete OLE sector')
  555. data.append(sector_data)
  556. # jump to next sector in the FAT:
  557. try:
  558. sect = fat[sect] & 0xFFFFFFFF # JYTHON-WORKAROUND
  559. except IndexError:
  560. # [PL] if pointer is out of the FAT an exception is raised
  561. self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE FAT, sector index out of range')
  562. # stop reading here if the exception is ignored:
  563. break
  564. #[PL] Last sector should be a "end of chain" marker:
  565. # if sect != ENDOFCHAIN:
  566. # raise IOError('incorrect last sector index in OLE stream')
  567. data = b"".join(data)
  568. # Data is truncated to the actual stream size:
  569. if len(data) >= size:
  570. log.debug('Read data of length %d, truncated to stream size %d' % (len(data), size))
  571. data = data[:size]
  572. # actual stream size is stored for future use:
  573. self.size = size
  574. elif unknown_size:
  575. # actual stream size was not known, now we know the size of read
  576. # data:
  577. log.debug('Read data of length %d, the stream size was unknown' % len(data))
  578. self.size = len(data)
  579. else:
  580. # read data is less than expected:
  581. log.debug('Read data of length %d, less than expected stream size %d' % (len(data), size))
  582. # TODO: provide details in exception message
  583. self.size = len(data)
  584. self.ole._raise_defect(DEFECT_INCORRECT, 'OLE stream size is less than declared')
  585. # when all data is read in memory, BytesIO constructor is called
  586. io.BytesIO.__init__(self, data)
  587. # Then the OleStream object can be used as a read-only file object.
  588. #--- OleDirectoryEntry -------------------------------------------------------
  589. class OleDirectoryEntry:
  590. """
  591. OLE2 Directory Entry
  592. """
  593. #[PL] parsing code moved from OleFileIO.loaddirectory
  594. # struct to parse directory entries:
  595. # <: little-endian byte order, standard sizes
  596. # (note: this should guarantee that Q returns a 64 bits int)
  597. # 64s: string containing entry name in unicode UTF-16 (max 31 chars) + null char = 64 bytes
  598. # H: uint16, number of bytes used in name buffer, including null = (len+1)*2
  599. # B: uint8, dir entry type (between 0 and 5)
  600. # B: uint8, color: 0=black, 1=red
  601. # I: uint32, index of left child node in the red-black tree, NOSTREAM if none
  602. # I: uint32, index of right child node in the red-black tree, NOSTREAM if none
  603. # I: uint32, index of child root node if it is a storage, else NOSTREAM
  604. # 16s: CLSID, unique identifier (only used if it is a storage)
  605. # I: uint32, user flags
  606. # Q (was 8s): uint64, creation timestamp or zero
  607. # Q (was 8s): uint64, modification timestamp or zero
  608. # I: uint32, SID of first sector if stream or ministream, SID of 1st sector
  609. # of stream containing ministreams if root entry, 0 otherwise
  610. # I: uint32, total stream size in bytes if stream (low 32 bits), 0 otherwise
  611. # I: uint32, total stream size in bytes if stream (high 32 bits), 0 otherwise
  612. STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII'
  613. # size of a directory entry: 128 bytes
  614. DIRENTRY_SIZE = 128
  615. assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE
  616. def __init__(self, entry, sid, olefile):
  617. """
  618. Constructor for an OleDirectoryEntry object.
  619. Parses a 128-bytes entry from the OLE Directory stream.
  620. :param entry : string (must be 128 bytes long)
  621. :param sid : index of this directory entry in the OLE file directory
  622. :param olefile: OleFileIO containing this directory entry
  623. """
  624. self.sid = sid
  625. # ref to olefile is stored for future use
  626. self.olefile = olefile
  627. # kids is a list of children entries, if this entry is a storage:
  628. # (list of OleDirectoryEntry objects)
  629. self.kids = []
  630. # kids_dict is a dictionary of children entries, indexed by their
  631. # name in lowercase: used to quickly find an entry, and to detect
  632. # duplicates
  633. self.kids_dict = {}
  634. # flag used to detect if the entry is referenced more than once in
  635. # directory:
  636. self.used = False
  637. # decode DirEntry
  638. (
  639. self.name_raw, # 64s: string containing entry name in unicode UTF-16 (max 31 chars) + null char = 64 bytes
  640. self.namelength, # H: uint16, number of bytes used in name buffer, including null = (len+1)*2
  641. self.entry_type,
  642. self.color,
  643. self.sid_left,
  644. self.sid_right,
  645. self.sid_child,
  646. clsid,
  647. self.dwUserFlags,
  648. self.createTime,
  649. self.modifyTime,
  650. self.isectStart,
  651. self.sizeLow,
  652. self.sizeHigh
  653. ) = struct.unpack(OleDirectoryEntry.STRUCT_DIRENTRY, entry)
  654. if self.entry_type not in [STGTY_ROOT, STGTY_STORAGE, STGTY_STREAM, STGTY_EMPTY]:
  655. olefile._raise_defect(DEFECT_INCORRECT, 'unhandled OLE storage type')
  656. # only first directory entry can (and should) be root:
  657. if self.entry_type == STGTY_ROOT and sid != 0:
  658. olefile._raise_defect(DEFECT_INCORRECT, 'duplicate OLE root entry')
  659. if sid == 0 and self.entry_type != STGTY_ROOT:
  660. olefile._raise_defect(DEFECT_INCORRECT, 'incorrect OLE root entry')
  661. #log.debug(struct.unpack(fmt_entry, entry[:len_entry]))
  662. # name should be at most 31 unicode characters + null character,
  663. # so 64 bytes in total (31*2 + 2):
  664. if self.namelength>64:
  665. olefile._raise_defect(DEFECT_INCORRECT, 'incorrect DirEntry name length >64 bytes')
  666. # if exception not raised, namelength is set to the maximum value:
  667. self.namelength = 64
  668. # only characters without ending null char are kept:
  669. self.name_utf16 = self.name_raw[:(self.namelength-2)]
  670. #TODO: check if the name is actually followed by a null unicode character ([MS-CFB] 2.6.1)
  671. #TODO: check if the name does not contain forbidden characters:
  672. # [MS-CFB] 2.6.1: "The following characters are illegal and MUST NOT be part of the name: '/', '\', ':', '!'."
  673. # name is converted from UTF-16LE to the path encoding specified in the OleFileIO:
  674. self.name = olefile._decode_utf16_str(self.name_utf16)
  675. log.debug('DirEntry SID=%d: %s' % (self.sid, repr(self.name)))
  676. log.debug(' - type: %d' % self.entry_type)
  677. log.debug(' - sect: %Xh' % self.isectStart)
  678. log.debug(' - SID left: %d, right: %d, child: %d' % (self.sid_left,
  679. self.sid_right, self.sid_child))
  680. # sizeHigh is only used for 4K sectors, it should be zero for 512 bytes
  681. # sectors, BUT apparently some implementations set it as 0xFFFFFFFF, 1
  682. # or some other value so it cannot be raised as a defect in general:
  683. if olefile.sectorsize == 512:
  684. if self.sizeHigh != 0 and self.sizeHigh != 0xFFFFFFFF:
  685. log.debug('sectorsize=%d, sizeLow=%d, sizeHigh=%d (%X)' %
  686. (olefile.sectorsize, self.sizeLow, self.sizeHigh, self.sizeHigh))
  687. olefile._raise_defect(DEFECT_UNSURE, 'incorrect OLE stream size')
  688. self.size = self.sizeLow
  689. else:
  690. self.size = self.sizeLow + (long(self.sizeHigh)<<32)
  691. log.debug(' - size: %d (sizeLow=%d, sizeHigh=%d)' % (self.size, self.sizeLow, self.sizeHigh))
  692. self.clsid = _clsid(clsid)
  693. # a storage should have a null size, BUT some implementations such as
  694. # Word 8 for Mac seem to allow non-null values => Potential defect:
  695. if self.entry_type == STGTY_STORAGE and self.size != 0:
  696. olefile._raise_defect(DEFECT_POTENTIAL, 'OLE storage with size>0')
  697. # check if stream is not already referenced elsewhere:
  698. self.is_minifat = False
  699. if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size>0:
  700. if self.size < olefile.minisectorcutoff \
  701. and self.entry_type==STGTY_STREAM: # only streams can be in MiniFAT
  702. # ministream object
  703. self.is_minifat = True
  704. else:
  705. self.is_minifat = False
  706. olefile._check_duplicate_stream(self.isectStart, self.is_minifat)
  707. self.sect_chain = None
  708. def build_sect_chain(self, olefile):
  709. if self.sect_chain:
  710. return
  711. if self.entry_type not in (STGTY_ROOT, STGTY_STREAM) or self.size == 0:
  712. return
  713. self.sect_chain = list()
  714. if self.is_minifat and not olefile.minifat:
  715. olefile.loadminifat()
  716. next_sect = self.isectStart
  717. while next_sect != ENDOFCHAIN:
  718. self.sect_chain.append(next_sect)
  719. if self.is_minifat:
  720. next_sect = olefile.minifat[next_sect]
  721. else:
  722. next_sect = olefile.fat[next_sect]
  723. def build_storage_tree(self):
  724. """
  725. Read and build the red-black tree attached to this OleDirectoryEntry
  726. object, if it is a storage.
  727. Note that this method builds a tree of all subentries, so it should
  728. only be called for the root object once.
  729. """
  730. log.debug('build_storage_tree: SID=%d - %s - sid_child=%d'
  731. % (self.sid, repr(self.name), self.sid_child))
  732. if self.sid_child != NOSTREAM:
  733. # if child SID is not NOSTREAM, then this entry is a storage.
  734. # Let's walk through the tree of children to fill the kids list:
  735. self.append_kids(self.sid_child)
  736. # Note from OpenOffice documentation: the safest way is to
  737. # recreate the tree because some implementations may store broken
  738. # red-black trees...
  739. # in the OLE file, entries are sorted on (length, name).
  740. # for convenience, we sort them on name instead:
  741. # (see rich comparison methods in this class)
  742. self.kids.sort()
  743. def append_kids(self, child_sid):
  744. """
  745. Walk through red-black tree of children of this directory entry to add
  746. all of them to the kids list. (recursive method)
  747. :param child_sid: index of child directory entry to use, or None when called
  748. first time for the root. (only used during recursion)
  749. """
  750. log.debug('append_kids: child_sid=%d' % child_sid)
  751. #[PL] this method was added to use simple recursion instead of a complex
  752. # algorithm.
  753. # if this is not a storage or a leaf of the tree, nothing to do:
  754. if child_sid == NOSTREAM:
  755. return
  756. # check if child SID is in the proper range:
  757. if child_sid<0 or child_sid>=len(self.olefile.direntries):
  758. self.olefile._raise_defect(DEFECT_INCORRECT, 'OLE DirEntry index out of range')
  759. else:
  760. # get child direntry:
  761. child = self.olefile._load_direntry(child_sid) #direntries[child_sid]
  762. log.debug('append_kids: child_sid=%d - %s - sid_left=%d, sid_right=%d, sid_child=%d'
  763. % (child.sid, repr(child.name), child.sid_left, child.sid_right, child.sid_child))
  764. # Check if kid was not already referenced in a storage:
  765. if child.used:
  766. self.olefile._raise_defect(DEFECT_INCORRECT,
  767. 'OLE Entry referenced more than once')
  768. return
  769. child.used = True
  770. # the directory entries are organized as a red-black tree.
  771. # (cf. Wikipedia for details)
  772. # First walk through left side of the tree:
  773. self.append_kids(child.sid_left)
  774. # Check if its name is not already used (case-insensitive):
  775. name_lower = child.name.lower()
  776. if name_lower in self.kids_dict:
  777. self.olefile._raise_defect(DEFECT_INCORRECT,
  778. "Duplicate filename in OLE storage")
  779. # Then the child_sid OleDirectoryEntry object is appended to the
  780. # kids list and dictionary:
  781. self.kids.append(child)
  782. self.kids_dict[name_lower] = child
  783. # Finally walk through right side of the tree:
  784. self.append_kids(child.sid_right)
  785. # Afterwards build kid's own tree if it's also a storage:
  786. child.build_storage_tree()
  787. def __eq__(self, other):
  788. "Compare entries by name"
  789. return self.name == other.name
  790. def __lt__(self, other):
  791. "Compare entries by name"
  792. return self.name < other.name
  793. def __ne__(self, other):
  794. return not self.__eq__(other)
  795. def __le__(self, other):
  796. return self.__eq__(other) or self.__lt__(other)
  797. # Reflected __lt__() and __le__() will be used for __gt__() and __ge__()
  798. #TODO: replace by the same function as MS implementation ?
  799. # (order by name length first, then case-insensitive order)
  800. def dump(self, tab = 0):
  801. "Dump this entry, and all its subentries (for debug purposes only)"
  802. TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
  803. "(property)", "(root)"]
  804. try:
  805. type_name = TYPES[self.entry_type]
  806. except IndexError:
  807. type_name = '(UNKNOWN)'
  808. print(" "*tab + repr(self.name), type_name, end=' ')
  809. if self.entry_type in (STGTY_STREAM, STGTY_ROOT):
  810. print(self.size, "bytes", end=' ')
  811. print()
  812. if self.entry_type in (STGTY_STORAGE, STGTY_ROOT) and self.clsid:
  813. print(" "*tab + "{%s}" % self.clsid)
  814. for kid in self.kids:
  815. kid.dump(tab + 2)
  816. def getmtime(self):
  817. """
  818. Return modification time of a directory entry.
  819. :returns: None if modification time is null, a python datetime object
  820. otherwise (UTC timezone)
  821. new in version 0.26
  822. """
  823. if self.modifyTime == 0:
  824. return None
  825. return filetime2datetime(self.modifyTime)
  826. def getctime(self):
  827. """
  828. Return creation time of a directory entry.
  829. :returns: None if modification time is null, a python datetime object
  830. otherwise (UTC timezone)
  831. new in version 0.26
  832. """
  833. if self.createTime == 0:
  834. return None
  835. return filetime2datetime(self.createTime)
  836. #--- OleFileIO ----------------------------------------------------------------
  837. class OleFileIO:
  838. """
  839. OLE container object
  840. This class encapsulates the interface to an OLE 2 structured
  841. storage file. Use the listdir and openstream methods to
  842. access the contents of this file.
  843. Object names are given as a list of strings, one for each subentry
  844. level. The root entry should be omitted. For example, the following
  845. code extracts all image streams from a Microsoft Image Composer file::
  846. ole = OleFileIO("fan.mic")
  847. for entry in ole.listdir():
  848. if entry[1:2] == "Image":
  849. fin = ole.openstream(entry)
  850. fout = open(entry[0:1], "wb")
  851. while True:
  852. s = fin.read(8192)
  853. if not s:
  854. break
  855. fout.write(s)
  856. You can use the viewer application provided with the Python Imaging
  857. Library to view the resulting files (which happens to be standard
  858. TIFF files).
  859. """
  860. def __init__(self, filename=None, raise_defects=DEFECT_FATAL,
  861. write_mode=False, debug=False, path_encoding=DEFAULT_PATH_ENCODING):
  862. """
  863. Constructor for the OleFileIO class.
  864. :param filename: file to open.
  865. - if filename is a string smaller than 1536 bytes, it is the path
  866. of the file to open. (bytes or unicode string)
  867. - if filename is a string longer than 1535 bytes, it is parsed
  868. as the content of an OLE file in memory. (bytes type only)
  869. - if filename is a file-like object (with read, seek and tell methods),
  870. it is parsed as-is.
  871. :param raise_defects: minimal level for defects to be raised as exceptions.
  872. (use DEFECT_FATAL for a typical application, DEFECT_INCORRECT for a
  873. security-oriented application, see source code for details)
  874. :param write_mode: bool, if True the file is opened in read/write mode instead
  875. of read-only by default.
  876. :param debug: bool, set debug mode (deprecated, not used anymore)
  877. :param path_encoding: None or str, name of the codec to use for path
  878. names (streams and storages), or None for Unicode.
  879. Unicode by default on Python 3+, UTF-8 on Python 2.x.
  880. (new in olefile 0.42, was hardcoded to Latin-1 until olefile v0.41)
  881. """
  882. # minimal level for defects to be raised as exceptions:
  883. self._raise_defects_level = raise_defects
  884. #: list of defects/issues not raised as exceptions:
  885. #: tuples of (exception type, message)
  886. self.parsing_issues = []
  887. self.write_mode = write_mode
  888. self.path_encoding = path_encoding
  889. # initialize all attributes to default values:
  890. self._filesize = None
  891. self.ministream = None
  892. self._used_streams_fat = []
  893. self._used_streams_minifat = []
  894. self.byte_order = None
  895. self.directory_fp = None
  896. self.direntries = None
  897. self.dll_version = None
  898. self.fat = None
  899. self.first_difat_sector = None
  900. self.first_dir_sector = None
  901. self.first_mini_fat_sector = None
  902. self.fp = None
  903. self.header_clsid = None
  904. self.header_signature = None
  905. self.metadata = None
  906. self.mini_sector_shift = None
  907. self.mini_sector_size = None
  908. self.mini_stream_cutoff_size = None
  909. self.minifat = None
  910. self.minifatsect = None
  911. # TODO: duplicates?
  912. self.minisectorcutoff = None
  913. self.minisectorsize = None
  914. self.ministream = None
  915. self.minor_version = None
  916. self.nb_sect = None
  917. self.num_difat_sectors = None
  918. self.num_dir_sectors = None
  919. self.num_fat_sectors = None
  920. self.num_mini_fat_sectors = None
  921. self.reserved1 = None
  922. self.reserved2 = None
  923. self.root = None
  924. self.sector_shift = None
  925. self.sector_size = None
  926. self.transaction_signature_number = None
  927. if filename:
  928. self.open(filename, write_mode=write_mode)
  929. def __enter__(self):
  930. return self
  931. def __exit__(self, *args):
  932. self.close()
  933. def _raise_defect(self, defect_level, message, exception_type=IOError):
  934. """
  935. This method should be called for any defect found during file parsing.
  936. It may raise an IOError exception according to the minimal level chosen
  937. for the OleFileIO object.
  938. :param defect_level: defect level, possible values are:
  939. - DEFECT_UNSURE : a case which looks weird, but not sure it's a defect
  940. - DEFECT_POTENTIAL : a potential defect
  941. - DEFECT_INCORRECT : an error according to specifications, but parsing can go on
  942. - DEFECT_FATAL : an error which cannot be ignored, parsing is impossible
  943. :param message: string describing the defect, used with raised exception.
  944. :param exception_type: exception class to be raised, IOError by default
  945. """
  946. # added by [PL]
  947. if defect_level >= self._raise_defects_level:
  948. log.error(message)
  949. raise exception_type(message)
  950. else:
  951. # just record the issue, no exception raised:
  952. self.parsing_issues.append((exception_type, message))
  953. log.warning(message)
  954. def _decode_utf16_str(self, utf16_str, errors='replace'):
  955. """
  956. Decode a string encoded in UTF-16 LE format, as found in the OLE
  957. directory or in property streams. Return a string encoded
  958. according to the path_encoding specified for the OleFileIO object.
  959. :param utf16_str: bytes string encoded in UTF-16 LE format
  960. :param errors: str, see python documentation for str.decode()
  961. :return: str, encoded according to path_encoding
  962. """
  963. unicode_str = utf16_str.decode('UTF-16LE', errors)
  964. if self.path_encoding:
  965. # an encoding has been specified for path names:
  966. return unicode_str.encode(self.path_encoding, errors)
  967. else:
  968. # path_encoding=None, return the Unicode string as-is:
  969. return unicode_str
  970. def open(self, filename, write_mode=False):
  971. """
  972. Open an OLE2 file in read-only or read/write mode.
  973. Read and parse the header, FAT and directory.
  974. :param filename: string-like or file-like object, OLE file to parse
  975. - if filename is a string smaller than 1536 bytes, it is the path
  976. of the file to open. (bytes or unicode string)
  977. - if filename is a string longer than 1535 bytes, it is parsed
  978. as the content of an OLE file in memory. (bytes type only)
  979. - if filename is a file-like object (with read, seek and tell methods),
  980. it is parsed as-is.
  981. :param write_mode: bool, if True the file is opened in read/write mode instead
  982. of read-only by default. (ignored if filename is not a path)
  983. """
  984. self.write_mode = write_mode
  985. #[PL] check if filename is a string-like or file-like object:
  986. # (it is better to check for a read() method)
  987. if hasattr(filename, 'read'):
  988. #TODO: also check seek and tell methods?
  989. # file-like object: use it directly
  990. self.fp = filename
  991. elif isinstance(filename, bytes) and len(filename) >= MINIMAL_OLEFILE_SIZE:
  992. # filename is a bytes string containing the OLE file to be parsed:
  993. # convert it to BytesIO
  994. self.fp = io.BytesIO(filename)
  995. else:
  996. # string-like object: filename of file on disk
  997. if self.write_mode:
  998. # open file in mode 'read with update, binary'
  999. # According to https://docs.python.org/2/library/functions.html#open
  1000. # 'w' would truncate the file, 'a' may only append on some Unixes
  1001. mode = 'r+b'
  1002. else:
  1003. # read-only mode by default
  1004. mode = 'rb'
  1005. self.fp = open(filename, mode)
  1006. # obtain the filesize by using seek and tell, which should work on most
  1007. # file-like objects:
  1008. #TODO: do it above, using getsize with filename when possible?
  1009. #TODO: fix code to fail with clear exception when filesize cannot be obtained
  1010. filesize=0
  1011. self.fp.seek(0, os.SEEK_END)
  1012. try:
  1013. filesize = self.fp.tell()
  1014. finally:
  1015. self.fp.seek(0)
  1016. self._filesize = filesize
  1017. log.debug('File size: %d bytes (%Xh)' % (self._filesize, self._filesize))
  1018. # lists of streams in FAT and MiniFAT, to detect duplicate references
  1019. # (list of indexes of first sectors of each stream)
  1020. self._used_streams_fat = []
  1021. self._used_streams_minifat = []
  1022. header = self.fp.read(512)
  1023. if len(header) != 512 or header[:8] != MAGIC:
  1024. log.debug('Magic = %r instead of %r' % (header[:8], MAGIC))
  1025. self._raise_defect(DEFECT_FATAL, "not an OLE2 structured storage file")
  1026. # [PL] header structure according to AAF specifications:
  1027. ##Header
  1028. ##struct StructuredStorageHeader { // [offset from start (bytes), length (bytes)]
  1029. ##BYTE _abSig[8]; // [00H,08] {0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1,
  1030. ## // 0x1a, 0xe1} for current version
  1031. ##CLSID _clsid; // [08H,16] reserved must be zero (WriteClassStg/
  1032. ## // GetClassFile uses root directory class id)
  1033. ##USHORT _uMinorVersion; // [18H,02] minor version of the format: 33 is
  1034. ## // written by reference implementation
  1035. ##USHORT _uDllVersion; // [1AH,02] major version of the dll/format: 3 for
  1036. ## // 512-byte sectors, 4 for 4 KB sectors
  1037. ##USHORT _uByteOrder; // [1CH,02] 0xFFFE: indicates Intel byte-ordering
  1038. ##USHORT _uSectorShift; // [1EH,02] size of sectors in power-of-two;
  1039. ## // typically 9 indicating 512-byte sectors
  1040. ##USHORT _uMiniSectorShift; // [20H,02] size of mini-sectors in power-of-two;
  1041. ## // typically 6 indicating 64-byte mini-sectors
  1042. ##USHORT _usReserved; // [22H,02] reserved, must be zero
  1043. ##ULONG _ulReserved1; // [24H,04] reserved, must be zero
  1044. ##FSINDEX _csectDir; // [28H,04] must be zero for 512-byte sectors,
  1045. ## // number of SECTs in directory chain for 4 KB
  1046. ## // sectors
  1047. ##FSINDEX _csectFat; // [2CH,04] number of SECTs in the FAT chain
  1048. ##SECT _sectDirStart; // [30H,04] first SECT in the directory chain
  1049. ##DFSIGNATURE _signature; // [34H,04] signature used for transactions; must
  1050. ## // be zero. The reference implementation
  1051. ## // does not support transactions
  1052. ##ULONG _ulMiniSectorCutoff; // [38H,04] maximum size for a mini stream;
  1053. ## // typically 4096 bytes
  1054. ##SECT _sectMiniFatStart; // [3CH,04] first SECT in the MiniFAT chain
  1055. ##FSINDEX _csectMiniFat; // [40H,04] number of SECTs in the MiniFAT chain
  1056. ##SECT _sectDifStart; // [44H,04] first SECT in the DIFAT chain
  1057. ##FSINDEX _csectDif; // [48H,04] number of SECTs in the DIFAT chain
  1058. ##SECT _sectFat[109]; // [4CH,436] the SECTs of first 109 FAT sectors
  1059. ##};
  1060. # [PL] header decoding:
  1061. # '<' indicates little-endian byte ordering for Intel (cf. struct module help)
  1062. fmt_header = '<8s16sHHHHHHLLLLLLLLLL'
  1063. header_size = struct.calcsize(fmt_header)
  1064. log.debug( "fmt_header size = %d, +FAT = %d" % (header_size, header_size + 109*4) )
  1065. header1 = header[:header_size]
  1066. (
  1067. self.header_signature,
  1068. self.header_clsid,
  1069. self.minor_version,
  1070. self.dll_version,
  1071. self.byte_order,
  1072. self.sector_shift,
  1073. self.mini_sector_shift,
  1074. self.reserved1,
  1075. self.reserved2,
  1076. self.num_dir_sectors,
  1077. self.num_fat_sectors,
  1078. self.first_dir_sector,
  1079. self.transaction_signature_number,
  1080. self.mini_stream_cutoff_size,
  1081. self.first_mini_fat_sector,
  1082. self.num_mini_fat_sectors,
  1083. self.first_difat_sector,
  1084. self.num_difat_sectors
  1085. ) = struct.unpack(fmt_header, header1)
  1086. log.debug( struct.unpack(fmt_header, header1))
  1087. if self.header_signature != MAGIC:
  1088. # OLE signature should always be present
  1089. self._raise_defect(DEFECT_FATAL, "incorrect OLE signature")
  1090. if self.header_clsid != bytearray(16):
  1091. # according to AAF specs, CLSID should always be zero
  1092. self._raise_defect(DEFECT_INCORRECT, "incorrect CLSID in OLE header")
  1093. log.debug( "Minor Version = %d" % self.minor_version )
  1094. # TODO: according to MS-CFB, minor version should be 0x003E
  1095. log.debug( "DLL Version = %d (expected: 3 or 4)" % self.dll_version )
  1096. if self.dll_version not in [3, 4]:
  1097. # version 3: usual format, 512 bytes per sector
  1098. # version 4: large format, 4K per sector
  1099. self._raise_defect(DEFECT_INCORRECT, "incorrect DllVersion in OLE header")
  1100. log.debug( "Byte Order = %X (expected: FFFE)" % self.byte_order )
  1101. if self.byte_order != 0xFFFE:
  1102. # For now only common little-endian documents are handled correctly
  1103. self._raise_defect(DEFECT_INCORRECT, "incorrect ByteOrder in OLE header")
  1104. # TODO: add big-endian support for documents created on Mac ?
  1105. # But according to [MS-CFB] ? v20140502, ByteOrder MUST be 0xFFFE.
  1106. self.sector_size = 2**self.sector_shift
  1107. log.debug( "Sector Size = %d bytes (expected: 512 or 4096)" % self.sector_size )
  1108. if self.sector_size not in [512, 4096]:
  1109. self._raise_defect(DEFECT_INCORRECT, "incorrect sector_size in OLE header")
  1110. if (self.dll_version==3 and self.sector_size!=512) \
  1111. or (self.dll_version==4 and self.sector_size!=4096):
  1112. self._raise_defect(DEFECT_INCORRECT, "sector_size does not match DllVersion in OLE header")
  1113. self.mini_sector_size = 2**self.mini_sector_shift
  1114. log.debug( "MiniFAT Sector Size = %d bytes (expected: 64)" % self.mini_sector_size )
  1115. if self.mini_sector_size not in [64]:
  1116. self._raise_defect(DEFECT_INCORRECT, "incorrect mini_sector_size in OLE header")
  1117. if self.reserved1 != 0 or self.reserved2 != 0:
  1118. self._raise_defect(DEFECT_INCORRECT, "incorrect OLE header (non-null reserved bytes)")
  1119. log.debug( "Number of Directory sectors = %d" % self.num_dir_sectors )
  1120. # Number of directory sectors (only allowed if DllVersion != 3)
  1121. if self.sector_size==512 and self.num_dir_sectors!=0:
  1122. self._raise_defect(DEFECT_INCORRECT, "incorrect number of directory sectors in OLE header")
  1123. log.debug( "Number of FAT sectors = %d" % self.num_fat_sectors )
  1124. # num_fat_sectors = number of FAT sectors in the file
  1125. log.debug( "First Directory sector = %Xh" % self.first_dir_sector )
  1126. # first_dir_sector = 1st sector containing the directory
  1127. log.debug( "Transaction Signature Number = %d" % self.transaction_signature_number )
  1128. # Signature should be zero, BUT some implementations do not follow this
  1129. # rule => only a potential defect:
  1130. # (according to MS-CFB, may be != 0 for applications supporting file
  1131. # transactions)
  1132. if self.transaction_signature_number != 0:
  1133. self._raise_defect(DEFECT_POTENTIAL, "incorrect OLE header (transaction_signature_number>0)")
  1134. log.debug( "Mini Stream cutoff size = %Xh (expected: 1000h)" % self.mini_stream_cutoff_size )
  1135. # MS-CFB: This integer field MUST be set to 0x00001000. This field
  1136. # specifies the maximum size of a user-defined data stream allocated
  1137. # from the mini FAT and mini stream, and that cutoff is 4096 bytes.
  1138. # Any user-defined data stream larger than or equal to this cutoff size
  1139. # must be allocated as normal sectors from the FAT.
  1140. if self.mini_stream_cutoff_size != 0x1000:
  1141. self._raise_defect(DEFECT_INCORRECT, "incorrect mini_stream_cutoff_size in OLE header")
  1142. # if no exception is raised, the cutoff size is fixed to 0x1000
  1143. log.warning('Fixing the mini_stream_cutoff_size to 4096 (mandatory value) instead of %d' %
  1144. self.mini_stream_cutoff_size)
  1145. self.mini_stream_cutoff_size = 0x1000
  1146. # TODO: check if these values are OK
  1147. log.debug( "First MiniFAT sector = %Xh" % self.first_mini_fat_sector )
  1148. log.debug( "Number of MiniFAT sectors = %d" % self.num_mini_fat_sectors )
  1149. log.debug( "First DIFAT sector = %Xh" % self.first_difat_sector )
  1150. log.debug( "Number of DIFAT sectors = %d" % self.num_difat_sectors )
  1151. # calculate the number of sectors in the file
  1152. # (-1 because header doesn't count)
  1153. self.nb_sect = ( (filesize + self.sector_size-1) // self.sector_size) - 1
  1154. log.debug( "Maximum number of sectors in the file: %d (%Xh)" % (self.nb_sect, self.nb_sect))
  1155. #TODO: change this test, because an OLE file MAY contain other data
  1156. # after the last sector.
  1157. # file clsid
  1158. self.header_clsid = _clsid(header[8:24])
  1159. #TODO: remove redundant attributes, and fix the code which uses them?
  1160. self.sectorsize = self.sector_size #1 << i16(header, 30)
  1161. self.minisectorsize = self.mini_sector_size #1 << i16(header, 32)
  1162. self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56)
  1163. # check known streams for duplicate references (these are always in FAT,
  1164. # never in MiniFAT):
  1165. self._check_duplicate_stream(self.first_dir_sector)
  1166. # check MiniFAT only if it is not empty:
  1167. if self.num_mini_fat_sectors:
  1168. self._check_duplicate_stream(self.first_mini_fat_sector)
  1169. # check DIFAT only if it is not empty:
  1170. if self.num_difat_sectors:
  1171. self._check_duplicate_stream(self.first_difat_sector)
  1172. # Load file allocation tables
  1173. self.loadfat(header)
  1174. # Load directory. This sets both the direntries list (ordered by sid)
  1175. # and the root (ordered by hierarchy) members.
  1176. self.loaddirectory(self.first_dir_sector)
  1177. self.minifatsect = self.first_mini_fat_sector
  1178. def close(self):
  1179. """
  1180. close the OLE file, to release the file object
  1181. """
  1182. self.fp.close()
  1183. def _check_duplicate_stream(self, first_sect, minifat=False):
  1184. """
  1185. Checks if a stream has not been already referenced elsewhere.
  1186. This method should only be called once for each known stream, and only
  1187. if stream size is not null.
  1188. :param first_sect: int, index of first sector of the stream in FAT
  1189. :param minifat: bool, if True, stream is located in the MiniFAT, else in the FAT
  1190. """
  1191. if minifat:
  1192. log.debug('_check_duplicate_stream: sect=%Xh in MiniFAT' % first_sect)
  1193. used_streams = self._used_streams_minifat
  1194. else:
  1195. log.debug('_check_duplicate_stream: sect=%Xh in FAT' % first_sect)
  1196. # some values can be safely ignored (not a real stream):
  1197. if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT):
  1198. return
  1199. used_streams = self._used_streams_fat
  1200. #TODO: would it be more efficient using a dict or hash values, instead
  1201. # of a list of long ?
  1202. if first_sect in used_streams:
  1203. self._raise_defect(DEFECT_INCORRECT, 'Stream referenced twice')
  1204. else:
  1205. used_streams.append(first_sect)
  1206. def dumpfat(self, fat, firstindex=0):
  1207. """
  1208. Display a part of FAT in human-readable form for debugging purposes
  1209. """
  1210. # dictionary to convert special FAT values in human-readable strings
  1211. VPL = 8 # values per line (8+1 * 8+1 = 81)
  1212. fatnames = {
  1213. FREESECT: "..free..",
  1214. ENDOFCHAIN: "[ END. ]",
  1215. FATSECT: "FATSECT ",
  1216. DIFSECT: "DIFSECT "
  1217. }
  1218. nbsect = len(fat)
  1219. nlines = (nbsect+VPL-1)//VPL
  1220. print("index", end=" ")
  1221. for i in range(VPL):
  1222. print("%8X" % i, end=" ")
  1223. print()
  1224. for l in range(nlines):
  1225. index = l*VPL
  1226. print("%6X:" % (firstindex+index), end=" ")
  1227. for i in range(index, index+VPL):
  1228. if i>=nbsect:
  1229. break
  1230. sect = fat[i]
  1231. aux = sect & 0xFFFFFFFF # JYTHON-WORKAROUND
  1232. if aux in fatnames:
  1233. name = fatnames[aux]
  1234. else:
  1235. if sect == i+1:
  1236. name = " --->"
  1237. else:
  1238. name = "%8X" % sect
  1239. print(name, end=" ")
  1240. print()
  1241. def dumpsect(self, sector, firstindex=0):
  1242. """
  1243. Display a sector in a human-readable form, for debugging purposes
  1244. """
  1245. VPL=8 # number of values per line (8+1 * 8+1 = 81)
  1246. tab = array.array(UINT32, sector)
  1247. if sys.byteorder == 'big':
  1248. tab.byteswap()
  1249. nbsect = len(tab)
  1250. nlines = (nbsect+VPL-1)//VPL
  1251. print("index", end=" ")
  1252. for i in range(VPL):
  1253. print("%8X" % i, end=" ")
  1254. print()
  1255. for l in range(nlines):
  1256. index = l*VPL
  1257. print("%6X:" % (firstindex+index), end=" ")
  1258. for i in range(index, index+VPL):
  1259. if i>=nbsect:
  1260. break
  1261. sect = tab[i]
  1262. name = "%8X" % sect
  1263. print(name, end=" ")
  1264. print()
  1265. def sect2array(self, sect):
  1266. """
  1267. convert a sector to an array of 32 bits unsigned integers,
  1268. swapping bytes on big endian CPUs such as PowerPC (old Macs)
  1269. """
  1270. a = array.array(UINT32, sect)
  1271. # if CPU is big endian, swap bytes:
  1272. if sys.byteorder == 'big':
  1273. a.byteswap()
  1274. return a
  1275. def loadfat_sect(self, sect):
  1276. """
  1277. Adds the indexes of the given sector to the FAT
  1278. :param sect: string containing the first FAT sector, or array of long integers
  1279. :returns: index of last FAT sector.
  1280. """
  1281. # a FAT sector is an array of ulong integers.
  1282. if isinstance(sect, array.array):
  1283. # if sect is already an array it is directly used
  1284. fat1 = sect
  1285. else:
  1286. # if it's a raw sector, it is parsed in an array
  1287. fat1 = self.sect2array(sect)
  1288. # Display the sector contents only if the logging level is debug:
  1289. if log.isEnabledFor(logging.DEBUG):
  1290. self.dumpsect(sect)
  1291. # The FAT is a sector chain starting at the first index of itself.
  1292. # initialize isect, just in case:
  1293. isect = None
  1294. for isect in fat1:
  1295. isect = isect & 0xFFFFFFFF # JYTHON-WORKAROUND
  1296. log.debug("isect = %X" % isect)
  1297. if isect == ENDOFCHAIN or isect == FREESECT:
  1298. # the end of the sector chain has been reached
  1299. log.debug("found end of sector chain")
  1300. break
  1301. # read the FAT sector
  1302. s = self.getsect(isect)
  1303. # parse it as an array of 32 bits integers, and add it to the
  1304. # global FAT array
  1305. nextfat = self.sect2array(s)
  1306. self.fat = self.fat + nextfat
  1307. return isect
  1308. def loadfat(self, header):
  1309. """
  1310. Load the FAT table.
  1311. """
  1312. # The 1st sector of the file contains sector numbers for the first 109
  1313. # FAT sectors, right after the header which is 76 bytes long.
  1314. # (always 109, whatever the sector size: 512 bytes = 76+4*109)
  1315. # Additional sectors are described by DIF blocks
  1316. log.debug('Loading the FAT table, starting with the 1st sector after the header')
  1317. sect = header[76:512]
  1318. log.debug( "len(sect)=%d, so %d integers" % (len(sect), len(sect)//4) )
  1319. #fat = []
  1320. # [PL] FAT is an array of 32 bits unsigned ints, it's more effective
  1321. # to use an array than a list in Python.
  1322. # It's initialized as empty first:
  1323. self.fat = array.array(UINT32)
  1324. self.loadfat_sect(sect)
  1325. #self.dumpfat(self.fat)
  1326. ## for i in range(0, len(sect), 4):
  1327. ## ix = i32(sect, i)
  1328. ## #[PL] if ix == -2 or ix == -1: # ix == 0xFFFFFFFE or ix == 0xFFFFFFFF:
  1329. ## if ix == 0xFFFFFFFE or ix == 0xFFFFFFFF:
  1330. ## break
  1331. ## s = self.getsect(ix)
  1332. ## #fat = fat + [i32(s, i) for i in range(0, len(s), 4)]
  1333. ## fat = fat + array.array(UINT32, s)
  1334. if self.num_difat_sectors != 0:
  1335. log.debug('DIFAT is used, because file size > 6.8MB.')
  1336. # [PL] There's a DIFAT because file is larger than 6.8MB
  1337. # some checks just in case:
  1338. if self.num_fat_sectors <= 109:
  1339. # there must be at least 109 blocks in header and the rest in
  1340. # DIFAT, so number of sectors must be >109.
  1341. self._raise_defect(DEFECT_INCORRECT, 'incorrect DIFAT, not enough sectors')
  1342. if self.first_difat_sector >= self.nb_sect:
  1343. # initial DIFAT block index must be valid
  1344. self._raise_defect(DEFECT_FATAL, 'incorrect DIFAT, first index out of range')
  1345. log.debug( "DIFAT analysis..." )
  1346. # We compute the necessary number of DIFAT sectors :
  1347. # Number of pointers per DIFAT sector = (sectorsize/4)-1
  1348. # (-1 because the last pointer is the next DIFAT sector number)
  1349. nb_difat_sectors = (self.sectorsize//4)-1
  1350. # (if 512 bytes: each DIFAT sector = 127 pointers + 1 towards next DIFAT sector)
  1351. nb_difat = (self.num_fat_sectors-109 + nb_difat_sectors-1)//nb_difat_sectors
  1352. log.debug( "nb_difat = %d" % nb_difat )
  1353. if self.num_difat_sectors != nb_difat:
  1354. raise IOError('incorrect DIFAT')
  1355. isect_difat = self.first_difat_sector
  1356. for i in iterrange(nb_difat):
  1357. log.debug( "DIFAT block %d, sector %X" % (i, isect_difat) )
  1358. #TODO: check if corresponding FAT SID = DIFSECT
  1359. sector_difat = self.getsect(isect_difat)
  1360. difat = self.sect2array(sector_difat)
  1361. # Display the sector contents only if the logging level is debug:
  1362. if log.isEnabledFor(logging.DEBUG):
  1363. self.dumpsect(sector_difat)
  1364. self.loadfat_sect(difat[:nb_difat_sectors])
  1365. # last DIFAT pointer is next DIFAT sector:
  1366. isect_difat = difat[nb_difat_sectors]
  1367. log.debug( "next DIFAT sector: %X" % isect_difat )
  1368. # checks:
  1369. if isect_difat not in [ENDOFCHAIN, FREESECT]:
  1370. # last DIFAT pointer value must be ENDOFCHAIN or FREESECT
  1371. raise IOError('incorrect end of DIFAT')
  1372. ## if len(self.fat) != self.num_fat_sectors:
  1373. ## # FAT should contain num_fat_sectors blocks
  1374. ## print("FAT length: %d instead of %d" % (len(self.fat), self.num_fat_sectors))
  1375. ## raise IOError('incorrect DIFAT')
  1376. else:
  1377. log.debug('No DIFAT, because file size < 6.8MB.')
  1378. # since FAT is read from fixed-size sectors, it may contain more values
  1379. # than the actual number of sectors in the file.
  1380. # Keep only the relevant sector indexes:
  1381. if len(self.fat) > self.nb_sect:
  1382. log.debug('len(fat)=%d, shrunk to nb_sect=%d' % (len(self.fat), self.nb_sect))
  1383. self.fat = self.fat[:self.nb_sect]
  1384. log.debug('FAT references %d sectors / Maximum %d sectors in file' % (len(self.fat), self.nb_sect))
  1385. # Display the FAT contents only if the logging level is debug:
  1386. if log.isEnabledFor(logging.DEBUG):
  1387. log.debug('\nFAT:')
  1388. self.dumpfat(self.fat)
  1389. def loadminifat(self):
  1390. """
  1391. Load the MiniFAT table.
  1392. """
  1393. # MiniFAT is stored in a standard sub-stream, pointed to by a header
  1394. # field.
  1395. # NOTE: there are two sizes to take into account for this stream:
  1396. # 1) Stream size is calculated according to the number of sectors
  1397. # declared in the OLE header. This allocated stream may be more than
  1398. # needed to store the actual sector indexes.
  1399. # (self.num_mini_fat_sectors is the number of sectors of size self.sector_size)
  1400. stream_size = self.num_mini_fat_sectors * self.sector_size
  1401. # 2) Actually used size is calculated by dividing the MiniStream size
  1402. # (given by root entry size) by the size of mini sectors, *4 for
  1403. # 32 bits indexes:
  1404. nb_minisectors = (self.root.size + self.mini_sector_size-1) // self.mini_sector_size
  1405. used_size = nb_minisectors * 4
  1406. log.debug('loadminifat(): minifatsect=%d, nb FAT sectors=%d, used_size=%d, stream_size=%d, nb MiniSectors=%d' %
  1407. (self.minifatsect, self.num_mini_fat_sectors, used_size, stream_size, nb_minisectors))
  1408. if used_size > stream_size:
  1409. # This is not really a problem, but may indicate a wrong implementation:
  1410. self._raise_defect(DEFECT_INCORRECT, 'OLE MiniStream is larger than MiniFAT')
  1411. # In any case, first read stream_size:
  1412. s = self._open(self.minifatsect, stream_size, force_FAT=True).read()
  1413. #[PL] Old code replaced by an array:
  1414. #self.minifat = [i32(s, i) for i in range(0, len(s), 4)]
  1415. self.minifat = self.sect2array(s)
  1416. # Then shrink the array to used size, to avoid indexes out of MiniStream:
  1417. log.debug('MiniFAT shrunk from %d to %d sectors' % (len(self.minifat), nb_minisectors))
  1418. self.minifat = self.minifat[:nb_minisectors]
  1419. log.debug('loadminifat(): len=%d' % len(self.minifat))
  1420. # Display the FAT contents only if the logging level is debug:
  1421. if log.isEnabledFor(logging.DEBUG):
  1422. log.debug('\nMiniFAT:')
  1423. self.dumpfat(self.minifat)
  1424. def getsect(self, sect):
  1425. """
  1426. Read given sector from file on disk.
  1427. :param sect: int, sector index
  1428. :returns: a string containing the sector data.
  1429. """
  1430. # From [MS-CFB]: A sector number can be converted into a byte offset
  1431. # into the file by using the following formula:
  1432. # (sector number + 1) x Sector Size.
  1433. # This implies that sector #0 of the file begins at byte offset Sector
  1434. # Size, not at 0.
  1435. # [PL] the original code in PIL was wrong when sectors are 4KB instead of
  1436. # 512 bytes:
  1437. #self.fp.seek(512 + self.sectorsize * sect)
  1438. #[PL]: added safety checks:
  1439. #print("getsect(%X)" % sect)
  1440. try:
  1441. self.fp.seek(self.sectorsize * (sect+1))
  1442. except:
  1443. log.debug('getsect(): sect=%X, seek=%d, filesize=%d' %
  1444. (sect, self.sectorsize*(sect+1), self._filesize))
  1445. self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
  1446. sector = self.fp.read(self.sectorsize)
  1447. if len(sector) != self.sectorsize:
  1448. log.debug('getsect(): sect=%X, read=%d, sectorsize=%d' %
  1449. (sect, len(sector), self.sectorsize))
  1450. self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
  1451. return sector
  1452. def write_sect(self, sect, data, padding=b'\x00'):
  1453. """
  1454. Write given sector to file on disk.
  1455. :param sect: int, sector index
  1456. :param data: bytes, sector data
  1457. :param padding: single byte, padding character if data < sector size
  1458. """
  1459. if not isinstance(data, bytes):
  1460. raise TypeError("write_sect: data must be a bytes string")
  1461. if not isinstance(padding, bytes) or len(padding)!=1:
  1462. raise TypeError("write_sect: padding must be a bytes string of 1 char")
  1463. #TODO: we could allow padding=None for no padding at all
  1464. try:
  1465. self.fp.seek(self.sectorsize * (sect+1))
  1466. except:
  1467. log.debug('write_sect(): sect=%X, seek=%d, filesize=%d' %
  1468. (sect, self.sectorsize*(sect+1), self._filesize))
  1469. self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
  1470. if len(data) < self.sectorsize:
  1471. # add padding
  1472. data += padding * (self.sectorsize - len(data))
  1473. elif len(data) < self.sectorsize:
  1474. raise ValueError("Data is larger than sector size")
  1475. self.fp.write(data)
  1476. def _write_mini_sect(self, fp_pos, data, padding = b'\x00'):
  1477. """
  1478. Write given sector to file on disk.
  1479. :param fp_pos: int, file position
  1480. :param data: bytes, sector data
  1481. :param padding: single byte, padding character if data < sector size
  1482. """
  1483. if not isinstance(data, bytes):
  1484. raise TypeError("write_mini_sect: data must be a bytes string")
  1485. if not isinstance(padding, bytes) or len(padding) != 1:
  1486. raise TypeError("write_mini_sect: padding must be a bytes string of 1 char")
  1487. try:
  1488. self.fp.seek(fp_pos)
  1489. except:
  1490. log.debug('write_mini_sect(): fp_pos=%d, filesize=%d' %
  1491. (fp_pos, self._filesize))
  1492. self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
  1493. len_data = len(data)
  1494. if len_data < self.mini_sector_size:
  1495. data += padding * (self.mini_sector_size - len_data)
  1496. if self.mini_sector_size < len_data:
  1497. raise ValueError("Data is larger than sector size")
  1498. self.fp.write(data)
  1499. def loaddirectory(self, sect):
  1500. """
  1501. Load the directory.
  1502. :param sect: sector index of directory stream.
  1503. """
  1504. log.debug('Loading the Directory:')
  1505. # The directory is stored in a standard
  1506. # substream, independent of its size.
  1507. # open directory stream as a read-only file:
  1508. # (stream size is not known in advance)
  1509. self.directory_fp = self._open(sect, force_FAT=True)
  1510. #[PL] to detect malformed documents and avoid DoS attacks, the maximum
  1511. # number of directory entries can be calculated:
  1512. max_entries = self.directory_fp.size // 128
  1513. log.debug('loaddirectory: size=%d, max_entries=%d' %
  1514. (self.directory_fp.size, max_entries))
  1515. # Create list of directory entries
  1516. #self.direntries = []
  1517. # We start with a list of "None" object
  1518. self.direntries = [None] * max_entries
  1519. ## for sid in iterrange(max_entries):
  1520. ## entry = fp.read(128)
  1521. ## if not entry:
  1522. ## break
  1523. ## self.direntries.append(OleDirectoryEntry(entry, sid, self))
  1524. # load root entry:
  1525. root_entry = self._load_direntry(0)
  1526. # Root entry is the first entry:
  1527. self.root = self.direntries[0]
  1528. # TODO: read ALL directory entries (ignore bad entries?)
  1529. # TODO: adapt build_storage_tree to avoid duplicate reads
  1530. # for i in range(1, max_entries):
  1531. # self._load_direntry(i)
  1532. # read and build all storage trees, starting from the root:
  1533. self.root.build_storage_tree()
  1534. def _load_direntry (self, sid):
  1535. """
  1536. Load a directory entry from the directory.
  1537. This method should only be called once for each storage/stream when
  1538. loading the directory.
  1539. :param sid: index of storage/stream in the directory.
  1540. :returns: a OleDirectoryEntry object
  1541. :exception IOError: if the entry has always been referenced.
  1542. """
  1543. # check if SID is OK:
  1544. if sid<0 or sid>=len(self.direntries):
  1545. self._raise_defect(DEFECT_FATAL, "OLE directory index out of range")
  1546. # check if entry was already referenced:
  1547. if self.direntries[sid] is not None:
  1548. self._raise_defect(DEFECT_INCORRECT,
  1549. "double reference for OLE stream/storage")
  1550. # if exception not raised, return the object
  1551. return self.direntries[sid]
  1552. self.directory_fp.seek(sid * 128)
  1553. entry = self.directory_fp.read(128)
  1554. self.direntries[sid] = OleDirectoryEntry(entry, sid, self)
  1555. return self.direntries[sid]
  1556. def dumpdirectory(self):
  1557. """
  1558. Dump directory (for debugging only)
  1559. """
  1560. self.root.dump()
  1561. def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False):
  1562. """
  1563. Open a stream, either in FAT or MiniFAT according to its size.
  1564. (openstream helper)
  1565. :param start: index of first sector
  1566. :param size: size of stream (or nothing if size is unknown)
  1567. :param force_FAT: if False (default), stream will be opened in FAT or MiniFAT
  1568. according to size. If True, it will always be opened in FAT.
  1569. """
  1570. log.debug('OleFileIO.open(): sect=%Xh, size=%d, force_FAT=%s' %
  1571. (start, size, str(force_FAT)))
  1572. # stream size is compared to the mini_stream_cutoff_size threshold:
  1573. if size < self.minisectorcutoff and not force_FAT:
  1574. # ministream object
  1575. if not self.ministream:
  1576. # load MiniFAT if it wasn't already done:
  1577. self.loadminifat()
  1578. # The first sector index of the miniFAT stream is stored in the
  1579. # root directory entry:
  1580. size_ministream = self.root.size
  1581. log.debug('Opening MiniStream: sect=%Xh, size=%d' %
  1582. (self.root.isectStart, size_ministream))
  1583. self.ministream = self._open(self.root.isectStart,
  1584. size_ministream, force_FAT=True)
  1585. return OleStream(fp=self.ministream, sect=start, size=size,
  1586. offset=0, sectorsize=self.minisectorsize,
  1587. fat=self.minifat, filesize=self.ministream.size,
  1588. olefileio=self)
  1589. else:
  1590. # standard stream
  1591. return OleStream(fp=self.fp, sect=start, size=size,
  1592. offset=self.sectorsize,
  1593. sectorsize=self.sectorsize, fat=self.fat,
  1594. filesize=self._filesize,
  1595. olefileio=self)
  1596. def _list(self, files, prefix, node, streams=True, storages=False):
  1597. """
  1598. listdir helper
  1599. :param files: list of files to fill in
  1600. :param prefix: current location in storage tree (list of names)
  1601. :param node: current node (OleDirectoryEntry object)
  1602. :param streams: bool, include streams if True (True by default) - new in v0.26
  1603. :param storages: bool, include storages if True (False by default) - new in v0.26
  1604. (note: the root storage is never included)
  1605. """
  1606. prefix = prefix + [node.name]
  1607. for entry in node.kids:
  1608. if entry.entry_type == STGTY_STORAGE:
  1609. # this is a storage
  1610. if storages:
  1611. # add it to the list
  1612. files.append(prefix[1:] + [entry.name])
  1613. # check its kids
  1614. self._list(files, prefix, entry, streams, storages)
  1615. elif entry.entry_type == STGTY_STREAM:
  1616. # this is a stream
  1617. if streams:
  1618. # add it to the list
  1619. files.append(prefix[1:] + [entry.name])
  1620. else:
  1621. self._raise_defect(DEFECT_INCORRECT, 'The directory tree contains an entry which is not a stream nor a storage.')
  1622. def listdir(self, streams=True, storages=False):
  1623. """
  1624. Return a list of streams and/or storages stored in this file
  1625. :param streams: bool, include streams if True (True by default) - new in v0.26
  1626. :param storages: bool, include storages if True (False by default) - new in v0.26
  1627. (note: the root storage is never included)
  1628. :returns: list of stream and/or storage paths
  1629. """
  1630. files = []
  1631. self._list(files, [], self.root, streams, storages)
  1632. return files
  1633. def _find(self, filename):
  1634. """
  1635. Returns directory entry of given filename. (openstream helper)
  1636. Note: this method is case-insensitive.
  1637. :param filename: path of stream in storage tree (except root entry), either:
  1638. - a string using Unix path syntax, for example:
  1639. 'storage_1/storage_1.2/stream'
  1640. - or a list of storage filenames, path to the desired stream/storage.
  1641. Example: ['storage_1', 'storage_1.2', 'stream']
  1642. :returns: sid of requested filename
  1643. :exception IOError: if file not found
  1644. """
  1645. # if filename is a string instead of a list, split it on slashes to
  1646. # convert to a list:
  1647. if isinstance(filename, basestring):
  1648. filename = filename.split('/')
  1649. # walk across storage tree, following given path:
  1650. node = self.root
  1651. for name in filename:
  1652. for kid in node.kids:
  1653. if kid.name.lower() == name.lower():
  1654. break
  1655. else:
  1656. raise IOError("file not found")
  1657. node = kid
  1658. return node.sid
  1659. def openstream(self, filename):
  1660. """
  1661. Open a stream as a read-only file object (BytesIO).
  1662. Note: filename is case-insensitive.
  1663. :param filename: path of stream in storage tree (except root entry), either:
  1664. - a string using Unix path syntax, for example:
  1665. 'storage_1/storage_1.2/stream'
  1666. - or a list of storage filenames, path to the desired stream/storage.
  1667. Example: ['storage_1', 'storage_1.2', 'stream']
  1668. :returns: file object (read-only)
  1669. :exception IOError: if filename not found, or if this is not a stream.
  1670. """
  1671. sid = self._find(filename)
  1672. entry = self.direntries[sid]
  1673. if entry.entry_type != STGTY_STREAM:
  1674. raise IOError("this file is not a stream")
  1675. return self._open(entry.isectStart, entry.size)
  1676. def _write_mini_stream(self, entry, data_to_write):
  1677. if not entry.sect_chain:
  1678. entry.build_sect_chain(self)
  1679. nb_sectors = len(entry.sect_chain)
  1680. if not self.root.sect_chain:
  1681. self.root.build_sect_chain(self)
  1682. block_size = self.sector_size // self.mini_sector_size
  1683. for idx, sect in enumerate(entry.sect_chain):
  1684. sect_base = sect // block_size
  1685. sect_offset = sect % block_size
  1686. fp_pos = (self.root.sect_chain[sect_base] + 1)*self.sector_size + sect_offset*self.mini_sector_size
  1687. if idx < (nb_sectors - 1):
  1688. data_per_sector = data_to_write[idx * self.mini_sector_size: (idx + 1) * self.mini_sector_size]
  1689. else:
  1690. data_per_sector = data_to_write[idx * self.mini_sector_size:]
  1691. self._write_mini_sect(fp_pos, data_per_sector)
  1692. def write_stream(self, stream_name, data):
  1693. """
  1694. Write a stream to disk. For now, it is only possible to replace an
  1695. existing stream by data of the same size.
  1696. :param stream_name: path of stream in storage tree (except root entry), either:
  1697. - a string using Unix path syntax, for example:
  1698. 'storage_1/storage_1.2/stream'
  1699. - or a list of storage filenames, path to the desired stream/storage.
  1700. Example: ['storage_1', 'storage_1.2', 'stream']
  1701. :param data: bytes, data to be written, must be the same size as the original
  1702. stream.
  1703. """
  1704. if not isinstance(data, bytes):
  1705. raise TypeError("write_stream: data must be a bytes string")
  1706. sid = self._find(stream_name)
  1707. entry = self.direntries[sid]
  1708. if entry.entry_type != STGTY_STREAM:
  1709. raise IOError("this is not a stream")
  1710. size = entry.size
  1711. if size != len(data):
  1712. raise ValueError("write_stream: data must be the same size as the existing stream")
  1713. if size < self.minisectorcutoff and entry.entry_type != STGTY_ROOT:
  1714. return self._write_mini_stream(entry = entry, data_to_write = data)
  1715. sect = entry.isectStart
  1716. # number of sectors to write
  1717. nb_sectors = (size + (self.sectorsize-1)) // self.sectorsize
  1718. log.debug('nb_sectors = %d' % nb_sectors)
  1719. for i in range(nb_sectors):
  1720. ## try:
  1721. ## self.fp.seek(offset + self.sectorsize * sect)
  1722. ## except:
  1723. ## log.debug('sect=%d, seek=%d' %
  1724. ## (sect, offset+self.sectorsize*sect))
  1725. ## raise IOError('OLE sector index out of range')
  1726. # extract one sector from data, the last one being smaller:
  1727. if i<(nb_sectors-1):
  1728. data_sector = data [i*self.sectorsize : (i+1)*self.sectorsize]
  1729. #TODO: comment this if it works
  1730. assert(len(data_sector)==self.sectorsize)
  1731. else:
  1732. data_sector = data [i*self.sectorsize:]
  1733. #TODO: comment this if it works
  1734. log.debug('write_stream: size=%d sectorsize=%d data_sector=%Xh size%%sectorsize=%d'
  1735. % (size, self.sectorsize, len(data_sector), size % self.sectorsize))
  1736. assert(len(data_sector) % self.sectorsize==size % self.sectorsize)
  1737. self.write_sect(sect, data_sector)
  1738. ## self.fp.write(data_sector)
  1739. # jump to next sector in the FAT:
  1740. try:
  1741. sect = self.fat[sect]
  1742. except IndexError:
  1743. # [PL] if pointer is out of the FAT an exception is raised
  1744. raise IOError('incorrect OLE FAT, sector index out of range')
  1745. #[PL] Last sector should be a "end of chain" marker:
  1746. if sect != ENDOFCHAIN:
  1747. raise IOError('incorrect last sector index in OLE stream')
  1748. def get_type(self, filename):
  1749. """
  1750. Test if given filename exists as a stream or a storage in the OLE
  1751. container, and return its type.
  1752. :param filename: path of stream in storage tree. (see openstream for syntax)
  1753. :returns: False if object does not exist, its entry type (>0) otherwise:
  1754. - STGTY_STREAM: a stream
  1755. - STGTY_STORAGE: a storage
  1756. - STGTY_ROOT: the root entry
  1757. """
  1758. try:
  1759. sid = self._find(filename)
  1760. entry = self.direntries[sid]
  1761. return entry.entry_type
  1762. except:
  1763. return False
  1764. def getclsid(self, filename):
  1765. """
  1766. Return clsid of a stream/storage.
  1767. :param filename: path of stream/storage in storage tree. (see openstream for
  1768. syntax)
  1769. :returns: Empty string if clsid is null, a printable representation of the clsid otherwise
  1770. new in version 0.44
  1771. """
  1772. sid = self._find(filename)
  1773. entry = self.direntries[sid]
  1774. return entry.clsid
  1775. def getmtime(self, filename):
  1776. """
  1777. Return modification time of a stream/storage.
  1778. :param filename: path of stream/storage in storage tree. (see openstream for
  1779. syntax)
  1780. :returns: None if modification time is null, a python datetime object
  1781. otherwise (UTC timezone)
  1782. new in version 0.26
  1783. """
  1784. sid = self._find(filename)
  1785. entry = self.direntries[sid]
  1786. return entry.getmtime()
  1787. def getctime(self, filename):
  1788. """
  1789. Return creation time of a stream/storage.
  1790. :param filename: path of stream/storage in storage tree. (see openstream for
  1791. syntax)
  1792. :returns: None if creation time is null, a python datetime object
  1793. otherwise (UTC timezone)
  1794. new in version 0.26
  1795. """
  1796. sid = self._find(filename)
  1797. entry = self.direntries[sid]
  1798. return entry.getctime()
  1799. def exists(self, filename):
  1800. """
  1801. Test if given filename exists as a stream or a storage in the OLE
  1802. container.
  1803. Note: filename is case-insensitive.
  1804. :param filename: path of stream in storage tree. (see openstream for syntax)
  1805. :returns: True if object exist, else False.
  1806. """
  1807. try:
  1808. sid = self._find(filename)
  1809. return True
  1810. except:
  1811. return False
  1812. def get_size(self, filename):
  1813. """
  1814. Return size of a stream in the OLE container, in bytes.
  1815. :param filename: path of stream in storage tree (see openstream for syntax)
  1816. :returns: size in bytes (long integer)
  1817. :exception IOError: if file not found
  1818. :exception TypeError: if this is not a stream.
  1819. """
  1820. sid = self._find(filename)
  1821. entry = self.direntries[sid]
  1822. if entry.entry_type != STGTY_STREAM:
  1823. #TODO: Should it return zero instead of raising an exception ?
  1824. raise TypeError('object is not an OLE stream')
  1825. return entry.size
  1826. def get_rootentry_name(self):
  1827. """
  1828. Return root entry name. Should usually be 'Root Entry' or 'R' in most
  1829. implementations.
  1830. """
  1831. return self.root.name
  1832. def getproperties(self, filename, convert_time=False, no_conversion=None):
  1833. """
  1834. Return properties described in substream.
  1835. :param filename: path of stream in storage tree (see openstream for syntax)
  1836. :param convert_time: bool, if True timestamps will be converted to Python datetime
  1837. :param no_conversion: None or list of int, timestamps not to be converted
  1838. (for example total editing time is not a real timestamp)
  1839. :returns: a dictionary of values indexed by id (integer)
  1840. """
  1841. #REFERENCE: [MS-OLEPS] https://msdn.microsoft.com/en-us/library/dd942421.aspx
  1842. # make sure no_conversion is a list, just to simplify code below:
  1843. if no_conversion == None:
  1844. no_conversion = []
  1845. # stream path as a string to report exceptions:
  1846. streampath = filename
  1847. if not isinstance(streampath, str):
  1848. streampath = '/'.join(streampath)
  1849. fp = self.openstream(filename)
  1850. data = {}
  1851. try:
  1852. # header
  1853. s = fp.read(28)
  1854. clsid = _clsid(s[8:24])
  1855. # format id
  1856. s = fp.read(20)
  1857. fmtid = _clsid(s[:16])
  1858. fp.seek(i32(s, 16))
  1859. # get section
  1860. s = b"****" + fp.read(i32(fp.read(4))-4)
  1861. # number of properties:
  1862. num_props = i32(s, 4)
  1863. except BaseException as exc:
  1864. # catch exception while parsing property header, and only raise
  1865. # a DEFECT_INCORRECT then return an empty dict, because this is not
  1866. # a fatal error when parsing the whole file
  1867. msg = 'Error while parsing properties header in stream %s: %s' % (
  1868. repr(streampath), exc)
  1869. self._raise_defect(DEFECT_INCORRECT, msg, type(exc))
  1870. return data
  1871. # clamp num_props based on the data length
  1872. num_props = min(num_props, int(len(s) / 8))
  1873. for i in iterrange(num_props):
  1874. property_id = 0 # just in case of an exception
  1875. try:
  1876. property_id = i32(s, 8+i*8)
  1877. offset = i32(s, 12+i*8)
  1878. property_type = i32(s, offset)
  1879. log.debug('property id=%d: type=%d offset=%X' % (property_id, property_type, offset))
  1880. # test for common types first (should perhaps use
  1881. # a dictionary instead?)
  1882. if property_type == VT_I2: # 16-bit signed integer
  1883. value = i16(s, offset+4)
  1884. if value >= 32768:
  1885. value = value - 65536
  1886. elif property_type == VT_UI2: # 2-byte unsigned integer
  1887. value = i16(s, offset+4)
  1888. elif property_type in (VT_I4, VT_INT, VT_ERROR):
  1889. # VT_I4: 32-bit signed integer
  1890. # VT_ERROR: HRESULT, similar to 32-bit signed integer,
  1891. # see https://msdn.microsoft.com/en-us/library/cc230330.aspx
  1892. value = i32(s, offset+4)
  1893. elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer
  1894. value = i32(s, offset+4) # FIXME
  1895. elif property_type in (VT_BSTR, VT_LPSTR):
  1896. # CodePageString, see https://msdn.microsoft.com/en-us/library/dd942354.aspx
  1897. # size is a 32 bits integer, including the null terminator, and
  1898. # possibly trailing or embedded null chars
  1899. #TODO: if codepage is unicode, the string should be converted as such
  1900. count = i32(s, offset+4)
  1901. value = s[offset+8:offset+8+count-1]
  1902. # remove all null chars:
  1903. value = value.replace(b'\x00', b'')
  1904. elif property_type == VT_BLOB:
  1905. # binary large object (BLOB)
  1906. # see https://msdn.microsoft.com/en-us/library/dd942282.aspx
  1907. count = i32(s, offset+4)
  1908. value = s[offset+8:offset+8+count]
  1909. elif property_type == VT_LPWSTR:
  1910. # UnicodeString
  1911. # see https://msdn.microsoft.com/en-us/library/dd942313.aspx
  1912. # "the string should NOT contain embedded or additional trailing
  1913. # null characters."
  1914. count = i32(s, offset+4)
  1915. value = self._decode_utf16_str(s[offset+8:offset+8+count*2])
  1916. elif property_type == VT_FILETIME:
  1917. value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
  1918. # FILETIME is a 64-bit int: "number of 100ns periods
  1919. # since Jan 1,1601".
  1920. if convert_time and property_id not in no_conversion:
  1921. log.debug('Converting property #%d to python datetime, value=%d=%fs'
  1922. %(property_id, value, float(value)/10000000))
  1923. # convert FILETIME to Python datetime.datetime
  1924. # inspired from https://code.activestate.com/recipes/511425-filetime-to-datetime/
  1925. _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0)
  1926. log.debug('timedelta days=%d' % (value//(10*1000000*3600*24)))
  1927. value = _FILETIME_null_date + datetime.timedelta(microseconds=value//10)
  1928. else:
  1929. # legacy code kept for backward compatibility: returns a
  1930. # number of seconds since Jan 1,1601
  1931. value = value // 10000000 # seconds
  1932. elif property_type == VT_UI1: # 1-byte unsigned integer
  1933. value = i8(s[offset+4])
  1934. elif property_type == VT_CLSID:
  1935. value = _clsid(s[offset+4:offset+20])
  1936. elif property_type == VT_CF:
  1937. # PropertyIdentifier or ClipboardData??
  1938. # see https://msdn.microsoft.com/en-us/library/dd941945.aspx
  1939. count = i32(s, offset+4)
  1940. value = s[offset+8:offset+8+count]
  1941. elif property_type == VT_BOOL:
  1942. # VARIANT_BOOL, 16 bits bool, 0x0000=Fals, 0xFFFF=True
  1943. # see https://msdn.microsoft.com/en-us/library/cc237864.aspx
  1944. value = bool(i16(s, offset+4))
  1945. else:
  1946. value = None # everything else yields "None"
  1947. log.debug('property id=%d: type=%d not implemented in parser yet' % (property_id, property_type))
  1948. # missing: VT_EMPTY, VT_NULL, VT_R4, VT_R8, VT_CY, VT_DATE,
  1949. # VT_DECIMAL, VT_I1, VT_I8, VT_UI8,
  1950. # see https://msdn.microsoft.com/en-us/library/dd942033.aspx
  1951. # FIXME: add support for VT_VECTOR
  1952. # VT_VECTOR is a 32 uint giving the number of items, followed by
  1953. # the items in sequence. The VT_VECTOR value is combined with the
  1954. # type of items, e.g. VT_VECTOR|VT_BSTR
  1955. # see https://msdn.microsoft.com/en-us/library/dd942011.aspx
  1956. #print("%08x" % property_id, repr(value), end=" ")
  1957. #print("(%s)" % VT[i32(s, offset) & 0xFFF])
  1958. data[property_id] = value
  1959. except BaseException as exc:
  1960. # catch exception while parsing each property, and only raise
  1961. # a DEFECT_INCORRECT, because parsing can go on
  1962. msg = 'Error while parsing property id %d in stream %s: %s' % (
  1963. property_id, repr(streampath), exc)
  1964. self._raise_defect(DEFECT_INCORRECT, msg, type(exc))
  1965. return data
  1966. def get_metadata(self):
  1967. """
  1968. Parse standard properties streams, return an OleMetadata object
  1969. containing all the available metadata.
  1970. (also stored in the metadata attribute of the OleFileIO object)
  1971. new in version 0.25
  1972. """
  1973. self.metadata = OleMetadata()
  1974. self.metadata.parse_properties(self)
  1975. return self.metadata
  1976. #
  1977. # --------------------------------------------------------------------
  1978. # This script can be used to dump the directory of any OLE2 structured
  1979. # storage file.
  1980. def main():
  1981. """
  1982. Main function when olefile is runs as a script from the command line.
  1983. This will open an OLE2 file and display its structure and properties
  1984. :return: nothing
  1985. """
  1986. import sys, optparse
  1987. DEFAULT_LOG_LEVEL = "warning" # Default log level
  1988. LOG_LEVELS = {
  1989. 'debug': logging.DEBUG,
  1990. 'info': logging.INFO,
  1991. 'warning': logging.WARNING,
  1992. 'error': logging.ERROR,
  1993. 'critical': logging.CRITICAL
  1994. }
  1995. usage = 'usage: %prog [options] <filename> [filename2 ...]'
  1996. parser = optparse.OptionParser(usage=usage)
  1997. parser.add_option("-c", action="store_true", dest="check_streams",
  1998. help='check all streams (for debugging purposes)')
  1999. parser.add_option("-d", action="store_true", dest="debug_mode",
  2000. help='debug mode, shortcut for -l debug (displays a lot of debug information, for developers only)')
  2001. parser.add_option('-l', '--loglevel', dest="loglevel", action="store", default=DEFAULT_LOG_LEVEL,
  2002. help="logging level debug/info/warning/error/critical (default=%default)")
  2003. (options, args) = parser.parse_args()
  2004. print('olefile version %s %s - https://www.decalage.info/en/olefile\n' % (__version__, __date__))
  2005. # Print help if no arguments are passed
  2006. if len(args) == 0:
  2007. print(__doc__)
  2008. parser.print_help()
  2009. sys.exit()
  2010. if options.debug_mode:
  2011. options.loglevel = 'debug'
  2012. # setup logging to the console
  2013. logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')
  2014. # also enable the module's logger:
  2015. enable_logging()
  2016. for filename in args:
  2017. try:
  2018. ole = OleFileIO(filename)#, raise_defects=DEFECT_INCORRECT)
  2019. print("-" * 68)
  2020. print(filename)
  2021. print("-" * 68)
  2022. ole.dumpdirectory()
  2023. for streamname in ole.listdir():
  2024. if streamname[-1][0] == "\005":
  2025. print("%r: properties" % streamname)
  2026. try:
  2027. props = ole.getproperties(streamname, convert_time=True)
  2028. props = sorted(props.items())
  2029. for k, v in props:
  2030. #[PL]: avoid to display too large or binary values:
  2031. if isinstance(v, (basestring, bytes)):
  2032. if len(v) > 50:
  2033. v = v[:50]
  2034. if isinstance(v, bytes):
  2035. # quick and dirty binary check:
  2036. for c in (1,2,3,4,5,6,7,11,12,14,15,16,17,18,19,20,
  2037. 21,22,23,24,25,26,27,28,29,30,31):
  2038. if c in bytearray(v):
  2039. v = '(binary data)'
  2040. break
  2041. print(" ", k, v)
  2042. except:
  2043. log.exception('Error while parsing property stream %r' % streamname)
  2044. if options.check_streams:
  2045. # Read all streams to check if there are errors:
  2046. print('\nChecking streams...')
  2047. for streamname in ole.listdir():
  2048. # print name using repr() to convert binary chars to \xNN:
  2049. print('-', repr('/'.join(streamname)),'-', end=' ')
  2050. st_type = ole.get_type(streamname)
  2051. if st_type == STGTY_STREAM:
  2052. print('size %d' % ole.get_size(streamname))
  2053. # just try to read stream in memory:
  2054. ole.openstream(streamname)
  2055. else:
  2056. print('NOT a stream : type=%d' % st_type)
  2057. print()
  2058. ## for streamname in ole.listdir():
  2059. ## # print name using repr() to convert binary chars to \xNN:
  2060. ## print('-', repr('/'.join(streamname)),'-', end=' ')
  2061. ## print(ole.getmtime(streamname))
  2062. ## print()
  2063. print('Modification/Creation times of all directory entries:')
  2064. for entry in ole.direntries:
  2065. if entry is not None:
  2066. print('- %s: mtime=%s ctime=%s' % (entry.name,
  2067. entry.getmtime(), entry.getctime()))
  2068. print()
  2069. # parse and display metadata:
  2070. try:
  2071. meta = ole.get_metadata()
  2072. meta.dump()
  2073. except:
  2074. log.exception('Error while parsing metadata')
  2075. print()
  2076. #[PL] Test a few new methods:
  2077. root = ole.get_rootentry_name()
  2078. print('Root entry name: "%s"' % root)
  2079. if ole.exists('worddocument'):
  2080. print("This is a Word document.")
  2081. print("type of stream 'WordDocument':", ole.get_type('worddocument'))
  2082. print("size :", ole.get_size('worddocument'))
  2083. if ole.exists('macros/vba'):
  2084. print("This document may contain VBA macros.")
  2085. # print parsing issues:
  2086. print('\nNon-fatal issues raised during parsing:')
  2087. if ole.parsing_issues:
  2088. for exctype, msg in ole.parsing_issues:
  2089. print('- %s: %s' % (exctype.__name__, msg))
  2090. else:
  2091. print('None')
  2092. except:
  2093. log.exception('Error while parsing file %r' % filename)
  2094. if __name__ == "__main__":
  2095. main()
  2096. # this code was developed while listening to The Wedding Present "Sea Monsters"