2018-09-09 18:57:08 +02:00
|
|
|
import logging
|
2018-04-01 01:04:06 +02:00
|
|
|
import os
|
2018-03-31 20:56:15 +02:00
|
|
|
import re
|
2018-04-01 01:04:06 +02:00
|
|
|
import zipfile
|
2018-06-21 23:02:41 +02:00
|
|
|
from typing import Dict, Set, Pattern
|
2018-07-10 21:30:38 +02:00
|
|
|
|
2018-09-05 18:41:08 +02:00
|
|
|
import xml.etree.ElementTree as ET # type: ignore
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-09-06 11:32:45 +02:00
|
|
|
from .archive import ArchiveBasedAbstractParser
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-09-20 22:37:53 +02:00
|
|
|
# pylint: disable=line-too-long
|
|
|
|
|
2018-06-21 23:07:21 +02:00
|
|
|
# Make pyflakes happy
|
|
|
|
assert Set
|
|
|
|
assert Pattern
|
2018-04-04 23:21:48 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
def _parse_xml(full_path: str):
|
2018-09-09 18:57:08 +02:00
|
|
|
""" This function parses XML, with namespace support. """
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 22:52:40 +02:00
|
|
|
namespace_map = dict()
|
|
|
|
for _, (key, value) in ET.iterparse(full_path, ("start-ns", )):
|
2018-09-09 18:57:08 +02:00
|
|
|
# The ns[0-9]+ namespaces are reserved for interal usage, so
|
|
|
|
# we have to use an other nomenclature.
|
2018-09-24 19:50:24 +02:00
|
|
|
if re.match('^ns[0-9]+$', key, re.I): # pragma: no cover
|
2018-09-20 22:37:53 +02:00
|
|
|
key = 'mat' + key[2:]
|
2018-09-09 18:57:08 +02:00
|
|
|
|
2018-07-19 22:52:40 +02:00
|
|
|
namespace_map[key] = value
|
2018-07-09 01:11:44 +02:00
|
|
|
ET.register_namespace(key, value)
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
return ET.parse(full_path), namespace_map
|
2018-07-01 23:11:10 +02:00
|
|
|
|
|
|
|
|
2018-09-09 18:57:08 +02:00
|
|
|
def _sort_xml_attributes(full_path: str) -> bool:
|
|
|
|
""" Sort xml attributes lexicographically,
|
|
|
|
because it's possible to fingerprint producers (MS Office, Libreoffice, …)
|
|
|
|
since they are all using different orders.
|
|
|
|
"""
|
|
|
|
tree = ET.parse(full_path)
|
|
|
|
root = tree.getroot()
|
|
|
|
|
|
|
|
for c in root:
|
|
|
|
c[:] = sorted(c, key=lambda child: (child.tag, child.get('desc')))
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True)
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class MSOfficeParser(ArchiveBasedAbstractParser):
|
2018-03-31 15:47:06 +02:00
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
|
|
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
|
2018-03-31 15:47:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'[Content_Types].xml',
|
|
|
|
'_rels/.rels',
|
|
|
|
'word/_rels/document.xml.rels',
|
|
|
|
'word/document.xml',
|
|
|
|
'word/fontTable.xml',
|
|
|
|
'word/settings.xml',
|
|
|
|
'word/styles.xml',
|
2018-09-20 22:37:53 +02:00
|
|
|
|
|
|
|
# https://msdn.microsoft.com/en-us/library/dd908153(v=office.12).aspx
|
|
|
|
'word/stylesWithEffects.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-09-20 22:37:53 +02:00
|
|
|
'word/webSettings.xml',
|
|
|
|
'word/theme',
|
2018-07-02 00:22:05 +02:00
|
|
|
'^docProps/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-09-20 22:37:53 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_rsid(full_path: str) -> bool:
|
|
|
|
""" The method will remove "revision session ID". We're '}rsid'
|
|
|
|
instead of proper parsing, since rsid can have multiple forms, like
|
|
|
|
`rsidRDefault`, `rsidR`, `rsids`, …
|
|
|
|
|
|
|
|
We're removing rsid tags in two times, because we can't modify
|
|
|
|
the xml while we're iterating on it.
|
|
|
|
|
|
|
|
For more details, see
|
|
|
|
- https://msdn.microsoft.com/en-us/library/office/documentformat.openxml.wordprocessing.previoussectionproperties.rsidrpr.aspx
|
|
|
|
- https://blogs.msdn.microsoft.com/brian_jones/2006/12/11/whats-up-with-all-those-rsids/
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
|
|
|
except ET.ParseError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# rsid, tags or attributes, are always under the `w` namespace
|
|
|
|
if 'w' not in namespace.keys():
|
|
|
|
return True
|
|
|
|
|
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
|
|
|
|
|
|
|
elements_to_remove = list()
|
|
|
|
for item in tree.iterfind('.//', namespace):
|
|
|
|
if '}rsid' in item.tag.strip().lower(): # resi as tag
|
|
|
|
elements_to_remove.append(item)
|
|
|
|
continue
|
|
|
|
for key in list(item.attrib.keys()): # rsid as attribute
|
|
|
|
if '}rsid' in key.lower():
|
|
|
|
del item.attrib[key]
|
|
|
|
|
|
|
|
for element in elements_to_remove:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-19 23:10:27 +02:00
|
|
|
""" In this function, we're changing the XML document in several
|
|
|
|
different times, since we don't want to change the tree we're currently
|
|
|
|
iterating on.
|
|
|
|
"""
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2018-09-09 18:57:08 +02:00
|
|
|
except ET.ParseError as e:
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-07-08 21:35:45 +02:00
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 23:10:27 +02:00
|
|
|
# Revisions are either deletions (`w:del`) or
|
|
|
|
# insertions (`w:ins`)
|
2018-07-09 01:11:44 +02:00
|
|
|
del_presence = tree.find('.//w:del', namespace)
|
|
|
|
ins_presence = tree.find('.//w:ins', namespace)
|
2018-07-07 18:05:54 +02:00
|
|
|
if del_presence is None and ins_presence is None:
|
2018-07-19 23:10:27 +02:00
|
|
|
return True # No revisions are present
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 23:10:27 +02:00
|
|
|
elements = list()
|
|
|
|
for element in tree.iterfind('.//w:del', namespace):
|
|
|
|
elements.append(element)
|
2018-07-01 23:11:10 +02:00
|
|
|
for element in elements:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
elements = list()
|
2018-07-09 01:11:44 +02:00
|
|
|
for element in tree.iterfind('.//w:ins', namespace):
|
2018-09-24 20:15:07 +02:00
|
|
|
for position, item in enumerate(tree.iter()): # pragma: no cover
|
2018-07-01 23:11:10 +02:00
|
|
|
if item == element:
|
|
|
|
for children in element.iterfind('./*'):
|
|
|
|
elements.append((element, position, children))
|
|
|
|
break
|
|
|
|
for (element, position, children) in elements:
|
|
|
|
parent_map[element].insert(position, children)
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-09-09 18:57:08 +02:00
|
|
|
if os.stat(full_path).st_size == 0: # Don't process empty files
|
|
|
|
return True
|
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
if full_path.endswith('/word/document.xml'):
|
2018-07-19 23:10:27 +02:00
|
|
|
# this file contains the revisions
|
2018-09-20 22:37:53 +02:00
|
|
|
if self.__remove_revisions(full_path) is False:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if full_path.endswith('.xml'):
|
|
|
|
if self.__remove_rsid(full_path) is False:
|
|
|
|
return False
|
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-03-31 20:56:15 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
2018-03-31 15:47:06 +02:00
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename.startswith('docProps/') and item.filename.endswith('.xml'):
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
2018-07-08 21:35:45 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
results = re.findall(r"<(.+)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
2018-07-08 21:35:45 +02:00
|
|
|
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
|
|
|
|
metadata[item.filename] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-03-31 15:47:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|
|
|
|
|
2018-04-01 00:17:06 +02:00
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class LibreOfficeParser(ArchiveBasedAbstractParser):
|
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.oasis.opendocument.text',
|
|
|
|
'application/vnd.oasis.opendocument.spreadsheet',
|
|
|
|
'application/vnd.oasis.opendocument.presentation',
|
|
|
|
'application/vnd.oasis.opendocument.graphics',
|
|
|
|
'application/vnd.oasis.opendocument.chart',
|
|
|
|
'application/vnd.oasis.opendocument.formula',
|
|
|
|
'application/vnd.oasis.opendocument.image',
|
2018-04-01 01:04:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'META-INF/manifest.xml',
|
|
|
|
'content.xml',
|
|
|
|
'manifest.rdf',
|
|
|
|
'mimetype',
|
|
|
|
'settings.xml',
|
|
|
|
'styles.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-07-02 00:22:05 +02:00
|
|
|
r'^meta\.xml$',
|
|
|
|
'^Configurations2/',
|
|
|
|
'^Thumbnails/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-04-01 01:04:06 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2018-09-09 18:57:08 +02:00
|
|
|
except ET.ParseError as e:
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-07-08 21:35:45 +02:00
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
if 'office' not in namespace.keys(): # no revisions in the current file
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
for text in tree.getroot().iterfind('.//office:text', namespace):
|
|
|
|
for changes in text.iterfind('.//text:tracked-changes', namespace):
|
2018-06-27 23:10:53 +02:00
|
|
|
text.remove(changes)
|
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
tree.write(full_path, xml_declaration=True)
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-09-09 18:57:08 +02:00
|
|
|
if os.stat(full_path).st_size == 0: # Don't process empty files
|
|
|
|
return True
|
|
|
|
|
|
|
|
if os.path.basename(full_path).endswith('.xml'):
|
|
|
|
if os.path.basename(full_path) == 'content.xml':
|
|
|
|
if self.__remove_revisions(full_path) is False:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
_sort_xml_attributes(full_path)
|
|
|
|
except ET.ParseError as e:
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-04-01 01:04:06 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename == 'meta.xml':
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
2018-07-08 21:35:45 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
results = re.findall(r"<((?:meta|dc|cp).+?)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
2018-07-08 21:35:45 +02:00
|
|
|
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
|
|
|
|
metadata[item.filename] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-04-01 01:04:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|