2018-04-01 01:04:06 +02:00
|
|
|
import os
|
2018-03-31 20:56:15 +02:00
|
|
|
import re
|
2018-04-01 01:04:06 +02:00
|
|
|
import shutil
|
2018-03-31 15:47:06 +02:00
|
|
|
import tempfile
|
2018-04-01 15:08:38 +02:00
|
|
|
import datetime
|
2018-04-01 01:04:06 +02:00
|
|
|
import zipfile
|
2018-06-27 23:10:53 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
2018-06-21 23:02:41 +02:00
|
|
|
from typing import Dict, Set, Pattern
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-03-31 15:47:06 +02:00
|
|
|
from . import abstract, parser_factory
|
|
|
|
|
2018-06-21 23:07:21 +02:00
|
|
|
# Make pyflakes happy
|
|
|
|
assert Set
|
|
|
|
assert Pattern
|
2018-04-04 23:21:48 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
def _parse_xml(full_path: str):
|
|
|
|
""" This function parse XML with namespace support. """
|
|
|
|
def parse_map(f): # etree support for ns is a bit rough
|
|
|
|
ns_map = dict()
|
|
|
|
for event, (k, v) in ET.iterparse(f, ("start-ns", )):
|
|
|
|
if event == "start-ns":
|
|
|
|
ns_map[k] = v
|
|
|
|
return ns_map
|
|
|
|
|
|
|
|
ns = parse_map(full_path)
|
|
|
|
|
|
|
|
# Register the namespaces
|
2018-07-02 00:22:05 +02:00
|
|
|
for k, v in ns.items():
|
2018-07-01 23:11:10 +02:00
|
|
|
ET.register_namespace(k, v)
|
|
|
|
|
|
|
|
return ET.parse(full_path), ns
|
|
|
|
|
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class ArchiveBasedAbstractParser(abstract.AbstractParser):
|
2018-06-27 23:10:53 +02:00
|
|
|
# Those are the files that have a format that _isn't_
|
|
|
|
# supported by MAT2, but that we want to keep anyway.
|
2018-07-02 00:22:05 +02:00
|
|
|
files_to_keep = set() # type: Set[str]
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
# Those are the files that we _do not_ want to keep,
|
|
|
|
# no matter if they are supported or not.
|
2018-07-02 00:22:05 +02:00
|
|
|
files_to_omit = set() # type: Set[Pattern]
|
2018-06-04 22:54:01 +02:00
|
|
|
|
2018-06-21 23:18:50 +02:00
|
|
|
def __init__(self, filename):
|
|
|
|
super().__init__(filename)
|
|
|
|
try: # better fail here than later
|
|
|
|
zipfile.ZipFile(self.filename)
|
|
|
|
except zipfile.BadZipFile:
|
|
|
|
raise ValueError
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-06-27 23:10:53 +02:00
|
|
|
""" This method can be used to apply specific treatment
|
|
|
|
to files present in the archive."""
|
|
|
|
return True
|
|
|
|
|
2018-05-16 22:36:59 +02:00
|
|
|
def _clean_zipinfo(self, zipinfo: zipfile.ZipInfo) -> zipfile.ZipInfo:
|
2018-04-01 01:04:06 +02:00
|
|
|
zipinfo.create_system = 3 # Linux
|
|
|
|
zipinfo.comment = b''
|
|
|
|
zipinfo.date_time = (1980, 1, 1, 0, 0, 0)
|
|
|
|
return zipinfo
|
|
|
|
|
2018-06-04 22:54:01 +02:00
|
|
|
def _get_zipinfo_meta(self, zipinfo: zipfile.ZipInfo) -> Dict[str, str]:
|
2018-04-01 15:08:38 +02:00
|
|
|
metadata = {}
|
|
|
|
if zipinfo.create_system == 3:
|
|
|
|
#metadata['create_system'] = 'Linux'
|
|
|
|
pass
|
|
|
|
elif zipinfo.create_system == 2:
|
|
|
|
metadata['create_system'] = 'Windows'
|
|
|
|
else:
|
|
|
|
metadata['create_system'] = 'Weird'
|
|
|
|
|
|
|
|
if zipinfo.comment:
|
2018-06-04 22:54:01 +02:00
|
|
|
metadata['comment'] = zipinfo.comment # type: ignore
|
2018-04-01 15:08:38 +02:00
|
|
|
|
|
|
|
if zipinfo.date_time != (1980, 1, 1, 0, 0, 0):
|
2018-06-21 23:02:41 +02:00
|
|
|
metadata['date_time'] = str(datetime.datetime(*zipinfo.date_time))
|
2018-04-01 15:08:38 +02:00
|
|
|
|
|
|
|
return metadata
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def remove_all(self) -> bool:
|
2018-06-27 21:48:46 +02:00
|
|
|
with zipfile.ZipFile(self.filename) as zin,\
|
|
|
|
zipfile.ZipFile(self.output_filename, 'w') as zout:
|
|
|
|
|
|
|
|
temp_folder = tempfile.mkdtemp()
|
|
|
|
|
|
|
|
for item in zin.infolist():
|
|
|
|
if item.filename[-1] == '/': # `is_dir` is added in Python3.6
|
|
|
|
continue # don't keep empty folders
|
|
|
|
|
|
|
|
zin.extract(member=item, path=temp_folder)
|
|
|
|
full_path = os.path.join(temp_folder, item.filename)
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
if self._specific_cleanup(full_path) is False:
|
|
|
|
shutil.rmtree(temp_folder)
|
|
|
|
os.remove(self.output_filename)
|
|
|
|
print("Something went wrong during deep cleaning of %s" % item.filename)
|
|
|
|
return False
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
if item.filename in self.files_to_keep:
|
|
|
|
# those files aren't supported, but we want to add them anyway
|
|
|
|
pass
|
|
|
|
elif any(map(lambda r: r.search(item.filename), self.files_to_omit)):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
# supported files that we want to clean then add
|
|
|
|
tmp_parser, mtype = parser_factory.get_parser(full_path) # type: ignore
|
|
|
|
if not tmp_parser:
|
|
|
|
shutil.rmtree(temp_folder)
|
|
|
|
os.remove(self.output_filename)
|
|
|
|
print("%s's format (%s) isn't supported" % (item.filename, mtype))
|
|
|
|
return False
|
|
|
|
tmp_parser.remove_all()
|
|
|
|
os.rename(tmp_parser.output_filename, full_path)
|
2018-06-27 21:48:46 +02:00
|
|
|
|
|
|
|
zinfo = zipfile.ZipInfo(item.filename) # type: ignore
|
|
|
|
clean_zinfo = self._clean_zipinfo(zinfo)
|
2018-06-27 23:10:53 +02:00
|
|
|
with open(full_path, 'rb') as f:
|
2018-06-27 21:48:46 +02:00
|
|
|
zout.writestr(clean_zinfo, f.read())
|
2018-06-21 23:02:41 +02:00
|
|
|
|
|
|
|
shutil.rmtree(temp_folder)
|
|
|
|
return True
|
|
|
|
|
2018-04-04 23:21:48 +02:00
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class MSOfficeParser(ArchiveBasedAbstractParser):
|
2018-03-31 15:47:06 +02:00
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
|
|
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
|
2018-03-31 15:47:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'[Content_Types].xml',
|
|
|
|
'_rels/.rels',
|
|
|
|
'word/_rels/document.xml.rels',
|
|
|
|
'word/document.xml',
|
|
|
|
'word/fontTable.xml',
|
|
|
|
'word/settings.xml',
|
|
|
|
'word/styles.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-07-02 00:22:05 +02:00
|
|
|
'^docProps/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def __remove_revisions(self, full_path: str) -> bool:
|
2018-07-01 23:11:10 +02:00
|
|
|
""" In this function, we're changing the XML
|
|
|
|
document in two times, since we don't want
|
|
|
|
to change the tree we're iterating on."""
|
|
|
|
tree, ns = _parse_xml(full_path)
|
|
|
|
|
|
|
|
# No revisions are present
|
|
|
|
if tree.find('.//w:del', ns) is None:
|
|
|
|
return True
|
|
|
|
elif tree.find('.//w:ins', ns) is None:
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
2018-07-01 23:11:10 +02:00
|
|
|
|
|
|
|
elements = list([element for element in tree.iterfind('.//w:del', ns)])
|
|
|
|
for element in elements:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
elements = list()
|
|
|
|
for element in tree.iterfind('.//w:ins', ns):
|
|
|
|
for position, item in enumerate(tree.iter()):
|
|
|
|
if item == element:
|
|
|
|
for children in element.iterfind('./*'):
|
|
|
|
elements.append((element, position, children))
|
|
|
|
break
|
|
|
|
|
|
|
|
for (element, position, children) in elements:
|
|
|
|
parent_map[element].insert(position, children)
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-07-01 23:11:10 +02:00
|
|
|
if full_path.endswith('/word/document.xml'):
|
|
|
|
return self.__remove_revisions(full_path)
|
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-03-31 20:56:15 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
2018-03-31 15:47:06 +02:00
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename.startswith('docProps/') and item.filename.endswith('.xml'):
|
2018-03-31 20:56:15 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
|
|
|
results = re.findall(r"<(.+)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
|
|
|
except TypeError: # We didn't manage to parse the xml file
|
|
|
|
pass
|
2018-03-31 20:56:15 +02:00
|
|
|
if not metadata: # better safe than sorry
|
|
|
|
metadata[item] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-03-31 15:47:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|
|
|
|
|
2018-04-01 00:17:06 +02:00
|
|
|
|
2018-04-01 01:04:06 +02:00
|
|
|
class LibreOfficeParser(ArchiveBasedAbstractParser):
|
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.oasis.opendocument.text',
|
|
|
|
'application/vnd.oasis.opendocument.spreadsheet',
|
|
|
|
'application/vnd.oasis.opendocument.presentation',
|
|
|
|
'application/vnd.oasis.opendocument.graphics',
|
|
|
|
'application/vnd.oasis.opendocument.chart',
|
|
|
|
'application/vnd.oasis.opendocument.formula',
|
|
|
|
'application/vnd.oasis.opendocument.image',
|
2018-04-01 01:04:06 +02:00
|
|
|
}
|
2018-06-21 23:02:41 +02:00
|
|
|
files_to_keep = {
|
2018-07-02 00:22:05 +02:00
|
|
|
'META-INF/manifest.xml',
|
|
|
|
'content.xml',
|
|
|
|
'manifest.rdf',
|
|
|
|
'mimetype',
|
|
|
|
'settings.xml',
|
|
|
|
'styles.xml',
|
2018-06-21 23:02:41 +02:00
|
|
|
}
|
|
|
|
files_to_omit = set(map(re.compile, { # type: ignore
|
2018-07-02 00:22:05 +02:00
|
|
|
r'^meta\.xml$',
|
|
|
|
'^Configurations2/',
|
|
|
|
'^Thumbnails/',
|
2018-06-21 23:02:41 +02:00
|
|
|
}))
|
2018-04-01 01:04:06 +02:00
|
|
|
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def __remove_revisions(self, full_path: str) -> bool:
|
2018-07-01 23:11:10 +02:00
|
|
|
tree, ns = _parse_xml(full_path)
|
|
|
|
|
2018-06-27 23:10:53 +02:00
|
|
|
if 'office' not in ns.keys(): # no revisions in the current file
|
|
|
|
return True
|
|
|
|
|
|
|
|
for text in tree.getroot().iterfind('.//office:text', ns):
|
|
|
|
for changes in text.iterfind('.//text:tracked-changes', ns):
|
|
|
|
text.remove(changes)
|
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
tree.write(full_path, xml_declaration=True)
|
2018-06-27 23:10:53 +02:00
|
|
|
|
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-06-27 23:10:53 +02:00
|
|
|
if os.path.basename(full_path) == 'content.xml':
|
|
|
|
return self.__remove_revisions(full_path)
|
|
|
|
return True
|
|
|
|
|
2018-06-21 23:02:41 +02:00
|
|
|
def get_meta(self) -> Dict[str, str]:
|
2018-04-01 01:04:06 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
|
|
|
metadata = {}
|
|
|
|
zipin = zipfile.ZipFile(self.filename)
|
2018-04-01 15:08:38 +02:00
|
|
|
for item in zipin.infolist():
|
|
|
|
if item.filename == 'meta.xml':
|
2018-04-01 01:04:06 +02:00
|
|
|
content = zipin.read(item).decode('utf-8')
|
2018-06-10 20:20:00 +02:00
|
|
|
try:
|
|
|
|
results = re.findall(r"<((?:meta|dc|cp).+?)>(.+)</\1>", content, re.I|re.M)
|
|
|
|
for (key, value) in results:
|
|
|
|
metadata[key] = value
|
|
|
|
except TypeError: # We didn't manage to parse the xml file
|
|
|
|
pass
|
2018-04-01 01:04:06 +02:00
|
|
|
if not metadata: # better safe than sorry
|
|
|
|
metadata[item] = 'harmful content'
|
2018-06-04 22:54:01 +02:00
|
|
|
for key, value in self._get_zipinfo_meta(item).items():
|
|
|
|
metadata[key] = value
|
2018-04-01 01:04:06 +02:00
|
|
|
zipin.close()
|
|
|
|
return metadata
|