2018-04-01 01:04:06 +02:00
|
|
|
import os
|
2018-03-31 20:56:15 +02:00
|
|
|
import re
|
2018-04-01 01:04:06 +02:00
|
|
|
import shutil
|
2018-03-31 15:47:06 +02:00
|
|
|
import tempfile
|
2018-04-01 15:08:38 +02:00
|
|
|
import datetime
|
2018-04-01 01:04:06 +02:00
|
|
|
import zipfile
|
2018-07-10 21:30:38 +02:00
|
|
|
import logging
|
2018-06-21 23:02:41 +02:00
|
|
|
from typing import Dict, Set, Pattern
|
2018-07-10 21:30:38 +02:00
|
|
|
|
2018-09-05 18:41:08 +02:00
|
|
|
import xml.etree.ElementTree as ET # type: ignore
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-09-06 00:49:35 +02:00
|
|
|
from . import abstract, parser_factory, UnknownMemberPolicy
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-06-21 23:07:21 +02:00
|
|
|
# Make pyflakes happy
|
|
|
|
assert Set
|
|
|
|
assert Pattern
|
2018-04-04 23:21:48 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
def _parse_xml(full_path: str):
|
2018-07-19 22:52:40 +02:00
|
|
|
""" This function parse XML, with namespace support. """
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 22:52:40 +02:00
|
|
|
namespace_map = dict()
|
|
|
|
for _, (key, value) in ET.iterparse(full_path, ("start-ns", )):
|
|
|
|
namespace_map[key] = value
|
2018-07-09 01:11:44 +02:00
|
|
|
ET.register_namespace(key, value)
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
return ET.parse(full_path), namespace_map
|
2018-07-01 23:11:10 +02:00
|
|
|
|
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class ArchiveBasedAbstractParser(abstract.AbstractParser):
|
2018-07-19 23:10:27 +02:00
|
|
|
""" Office files (.docx, .odt, …) are zipped files. """
|
2018-06-27 23:10:53 +02:00
|
|
|
# Those are the files that have a format that _isn't_
|
|
|
|
# supported by MAT2, but that we want to keep anyway.
|
2018-07-02 00:22:05 +02:00
|
|
|
files_to_keep = set() # type: Set[str]
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
# Those are the files that we _do not_ want to keep,
|
|
|
|
# no matter if they are supported or not.
|
2018-07-02 00:22:05 +02:00
|
|
|
files_to_omit = set() # type: Set[Pattern]
|
2018-06-04 22:54:01 +02:00
|
|
|
|
2018-08-31 21:25:46 +02:00
|
|
|
# what should the parser do if it encounters an unknown file in
|
2018-09-06 00:49:35 +02:00
|
|
|
# the archive?
|
|
|
|
unknown_member_policy = UnknownMemberPolicy.ABORT # type: UnknownMemberPolicy
|
2018-08-31 21:25:46 +02:00
|
|
|
|
2018-06-21 23:18:50 +02:00
|
|
|
def __init__(self, filename):
|
|
|
|
super().__init__(filename)
|
|
|
|
try: # better fail here than later
|
|
|
|
zipfile.ZipFile(self.filename)
|
|
|
|
except zipfile.BadZipFile:
|
|
|
|
raise ValueError
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-06-27 23:10:53 +02:00
|
|
|
""" This method can be used to apply specific treatment
|
|
|
|
to files present in the archive."""
|
2018-07-09 01:11:44 +02:00
|
|
|
# pylint: disable=unused-argument,no-self-use
|
2018-07-07 18:05:54 +02:00
|
|
|
return True # pragma: no cover
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def _clean_zipinfo(zipinfo: zipfile.ZipInfo) -> zipfile.ZipInfo:
|
2018-04-01 01:04:06 +02:00
|
|
|
zipinfo.create_system = 3 # Linux
|
|
|
|
zipinfo.comment = b''
|
2018-07-19 23:10:27 +02:00
|
|
|
zipinfo.date_time = (1980, 1, 1, 0, 0, 0) # this is as early as a zipfile can be
|
2018-04-01 01:04:06 +02:00
|
|
|
return zipinfo
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def _get_zipinfo_meta(zipinfo: zipfile.ZipInfo) -> Dict[str, str]:
|
2018-04-01 15:08:38 +02:00
|
|
|
metadata = {}
|
2018-07-19 23:10:27 +02:00
|
|
|
if zipinfo.create_system == 3: # this is Linux
|
2018-04-01 15:08:38 +02:00
|
|
|
pass
|
|
|
|
elif zipinfo.create_system == 2:
|
|
|
|
metadata['create_system'] = 'Windows'
|
|
|
|
else:
|
|
|
|
metadata['create_system'] = 'Weird'
|
|
|
|
|
|
|
|
if zipinfo.comment:
|
2018-06-04 22:54:01 +02:00
|
|
|
metadata['comment'] = zipinfo.comment # type: ignore
|
2018-04-01 15:08:38 +02:00
|
|
|
|
|
|
|
if zipinfo.date_time != (1980, 1, 1, 0, 0, 0):
|
2018-06-21 23:02:41 +02:00
|
|
|
metadata['date_time'] = str(datetime.datetime(*zipinfo.date_time))
|
2018-04-01 15:08:38 +02:00
|
|
|
|
|
|
|
return metadata
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def remove_all(self) -> bool:
|
2018-08-31 21:25:46 +02:00
|
|
|
# pylint: disable=too-many-branches
|
2018-09-05 17:22:17 +02:00
|
|
|
|
2018-06-27 21:48:46 +02:00
|
|
|
with zipfile.ZipFile(self.filename) as zin,\
|
|
|
|
zipfile.ZipFile(self.output_filename, 'w') as zout:
|
|
|
|
|
|
|
|
temp_folder = tempfile.mkdtemp()
|
2018-09-02 14:32:34 +02:00
|
|
|
abort = False
|
2018-06-27 21:48:46 +02:00
|
|
|
|
|
|
|
for item in zin.infolist():
|
|
|
|
if item.filename[-1] == '/': # `is_dir` is added in Python3.6
|
|
|
|
continue # don't keep empty folders
|
|
|
|
|
|
|
|
zin.extract(member=item, path=temp_folder)
|
|
|
|
full_path = os.path.join(temp_folder, item.filename)
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
if self._specific_cleanup(full_path) is False:
|
2018-09-01 14:14:32 +02:00
|
|
|
logging.warning("Something went wrong during deep cleaning of %s",
|
|
|
|
item.filename)
|
2018-09-04 22:15:39 +02:00
|
|
|
abort = True
|
|
|
|
continue
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
if item.filename in self.files_to_keep:
|
|
|
|
# those files aren't supported, but we want to add them anyway
|
|
|
|
pass
|
|
|
|
elif any(map(lambda r: r.search(item.filename), self.files_to_omit)):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
# supported files that we want to clean then add
|
|
|
|
tmp_parser, mtype = parser_factory.get_parser(full_path) # type: ignore
|
|
|
|
if not tmp_parser:
|
2018-09-06 00:49:35 +02:00
|
|
|
if self.unknown_member_policy == UnknownMemberPolicy.OMIT:
|
2018-08-31 21:25:46 +02:00
|
|
|
logging.warning("In file %s, omitting unknown element %s (format: %s)",
|
|
|
|
self.filename, item.filename, mtype)
|
|
|
|
continue
|
2018-09-06 00:49:35 +02:00
|
|
|
elif self.unknown_member_policy == UnknownMemberPolicy.KEEP:
|
2018-08-31 21:25:46 +02:00
|
|
|
logging.warning("In file %s, keeping unknown element %s (format: %s)",
|
|
|
|
self.filename, item.filename, mtype)
|
|
|
|
else:
|
|
|
|
logging.error("In file %s, element %s's format (%s) " +
|
|
|
|
"isn't supported",
|
|
|
|
self.filename, item.filename, mtype)
|
2018-09-02 14:32:34 +02:00
|
|
|
abort = True
|
|
|
|
continue
|
2018-08-31 21:25:46 +02:00
|
|
|
if tmp_parser:
|
|
|
|
tmp_parser.remove_all()
|
|
|
|
os.rename(tmp_parser.output_filename, full_path)
|
2018-06-27 21:48:46 +02:00
|
|
|
|
|
|
|
zinfo = zipfile.ZipInfo(item.filename) # type: ignore
|
|
|
|
clean_zinfo = self._clean_zipinfo(zinfo)
|
2018-06-27 23:10:53 +02:00
|
|
|
with open(full_path, 'rb') as f:
|
2018-06-27 21:48:46 +02:00
|
|
|
zout.writestr(clean_zinfo, f.read())
|
2018-06-21 23:02:41 +02:00
|
|
|
|
|
|
|
shutil.rmtree(temp_folder)
|
2018-09-02 14:32:34 +02:00
|
|
|
if abort:
|
|
|
|
os.remove(self.output_filename)
|
|
|
|
return False
|
2018-06-21 23:02:41 +02:00
|
|
|
return True
|
|
|
|
|
2018-04-04 23:21:48 +02:00
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class MSOfficeParser(ArchiveBasedAbstractParser):
|
2018-03-31 15:47:06 +02:00
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
|
|
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
|
2018-03-31 15:47:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'[Content_Types].xml',
|
|
|
|
'_rels/.rels',
|
|
|
|
'word/_rels/document.xml.rels',
|
|
|
|
'word/document.xml',
|
|
|
|
'word/fontTable.xml',
|
|
|
|
'word/settings.xml',
|
|
|
|
'word/styles.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-07-02 00:22:05 +02:00
|
|
|
'^docProps/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-19 23:10:27 +02:00
|
|
|
""" In this function, we're changing the XML document in several
|
|
|
|
different times, since we don't want to change the tree we're currently
|
|
|
|
iterating on.
|
|
|
|
"""
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2018-07-08 21:35:45 +02:00
|
|
|
except ET.ParseError:
|
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 23:10:27 +02:00
|
|
|
# Revisions are either deletions (`w:del`) or
|
|
|
|
# insertions (`w:ins`)
|
2018-07-09 01:11:44 +02:00
|
|
|
del_presence = tree.find('.//w:del', namespace)
|
|
|
|
ins_presence = tree.find('.//w:ins', namespace)
|
2018-07-07 18:05:54 +02:00
|
|
|
if del_presence is None and ins_presence is None:
|
2018-07-19 23:10:27 +02:00
|
|
|
return True # No revisions are present
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 23:10:27 +02:00
|
|
|
elements = list()
|
|
|
|
for element in tree.iterfind('.//w:del', namespace):
|
|
|
|
elements.append(element)
|
2018-07-01 23:11:10 +02:00
|
|
|
for element in elements:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
elements = list()
|
2018-07-09 01:11:44 +02:00
|
|
|
for element in tree.iterfind('.//w:ins', namespace):
|
2018-07-08 22:27:37 +02:00
|
|
|
for position, item in enumerate(tree.iter()): #pragma: no cover
|
2018-07-01 23:11:10 +02:00
|
|
|
if item == element:
|
|
|
|
for children in element.iterfind('./*'):
|
|
|
|
elements.append((element, position, children))
|
|
|
|
break
|
|
|
|
for (element, position, children) in elements:
|
|
|
|
parent_map[element].insert(position, children)
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-07-01 23:11:10 +02:00
|
|
|
if full_path.endswith('/word/document.xml'):
|
2018-07-19 23:10:27 +02:00
|
|
|
# this file contains the revisions
|
2018-07-01 23:11:10 +02:00
|
|
|
return self.__remove_revisions(full_path)
|
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-03-31 20:56:15 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
2018-03-31 15:47:06 +02:00
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename.startswith('docProps/') and item.filename.endswith('.xml'):
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
2018-07-08 21:35:45 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
results = re.findall(r"<(.+)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
2018-07-08 21:35:45 +02:00
|
|
|
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
|
|
|
|
metadata[item.filename] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-03-31 15:47:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|
|
|
|
|
2018-04-01 00:17:06 +02:00
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class LibreOfficeParser(ArchiveBasedAbstractParser):
|
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.oasis.opendocument.text',
|
|
|
|
'application/vnd.oasis.opendocument.spreadsheet',
|
|
|
|
'application/vnd.oasis.opendocument.presentation',
|
|
|
|
'application/vnd.oasis.opendocument.graphics',
|
|
|
|
'application/vnd.oasis.opendocument.chart',
|
|
|
|
'application/vnd.oasis.opendocument.formula',
|
|
|
|
'application/vnd.oasis.opendocument.image',
|
2018-04-01 01:04:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'META-INF/manifest.xml',
|
|
|
|
'content.xml',
|
|
|
|
'manifest.rdf',
|
|
|
|
'mimetype',
|
|
|
|
'settings.xml',
|
|
|
|
'styles.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-07-02 00:22:05 +02:00
|
|
|
r'^meta\.xml$',
|
|
|
|
'^Configurations2/',
|
|
|
|
'^Thumbnails/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-04-01 01:04:06 +02:00
|
|
|
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2018-07-08 21:35:45 +02:00
|
|
|
except ET.ParseError:
|
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
if 'office' not in namespace.keys(): # no revisions in the current file
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
for text in tree.getroot().iterfind('.//office:text', namespace):
|
|
|
|
for changes in text.iterfind('.//text:tracked-changes', namespace):
|
2018-06-27 23:10:53 +02:00
|
|
|
text.remove(changes)
|
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
tree.write(full_path, xml_declaration=True)
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-06-27 23:10:53 +02:00
|
|
|
if os.path.basename(full_path) == 'content.xml':
|
|
|
|
return self.__remove_revisions(full_path)
|
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-04-01 01:04:06 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename == 'meta.xml':
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
2018-07-08 21:35:45 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
results = re.findall(r"<((?:meta|dc|cp).+?)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
2018-07-08 21:35:45 +02:00
|
|
|
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
|
|
|
|
metadata[item.filename] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-04-01 01:04:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|