1
0
Fork 0
mat2/libmat2/office.py

279 lines
9.8 KiB
Python
Raw Normal View History

2018-04-01 01:04:06 +02:00
import os
2018-03-31 20:56:15 +02:00
import re
2018-04-01 01:04:06 +02:00
import shutil
2018-03-31 15:47:06 +02:00
import tempfile
import datetime
2018-04-01 01:04:06 +02:00
import zipfile
import logging
from typing import Dict, Set, Pattern
try: # protect against DoS
2018-07-08 17:12:17 +02:00
from defusedxml import ElementTree as ET # type: ignore
except ImportError:
2018-07-08 17:12:17 +02:00
import xml.etree.ElementTree as ET # type: ignore
2018-03-31 15:47:06 +02:00
2018-03-31 15:47:06 +02:00
from . import abstract, parser_factory
2018-06-21 23:07:21 +02:00
# Make pyflakes happy
assert Set
assert Pattern
logging.basicConfig(level=logging.ERROR)
2018-07-01 23:11:10 +02:00
def _parse_xml(full_path: str):
""" This function parse XML, with namespace support. """
2018-07-01 23:11:10 +02:00
namespace_map = dict()
for _, (key, value) in ET.iterparse(full_path, ("start-ns", )):
namespace_map[key] = value
2018-07-09 01:11:44 +02:00
ET.register_namespace(key, value)
2018-07-01 23:11:10 +02:00
2018-07-09 01:11:44 +02:00
return ET.parse(full_path), namespace_map
2018-07-01 23:11:10 +02:00
2018-04-01 01:04:06 +02:00
class ArchiveBasedAbstractParser(abstract.AbstractParser):
# Those are the files that have a format that _isn't_
# supported by MAT2, but that we want to keep anyway.
files_to_keep = set() # type: Set[str]
# Those are the files that we _do not_ want to keep,
# no matter if they are supported or not.
files_to_omit = set() # type: Set[Pattern]
2018-06-04 22:54:01 +02:00
def __init__(self, filename):
super().__init__(filename)
try: # better fail here than later
zipfile.ZipFile(self.filename)
except zipfile.BadZipFile:
raise ValueError
def _specific_cleanup(self, full_path: str) -> bool:
""" This method can be used to apply specific treatment
to files present in the archive."""
2018-07-09 01:11:44 +02:00
# pylint: disable=unused-argument,no-self-use
return True # pragma: no cover
2018-07-09 01:11:44 +02:00
@staticmethod
def _clean_zipinfo(zipinfo: zipfile.ZipInfo) -> zipfile.ZipInfo:
2018-04-01 01:04:06 +02:00
zipinfo.create_system = 3 # Linux
zipinfo.comment = b''
zipinfo.date_time = (1980, 1, 1, 0, 0, 0)
return zipinfo
2018-07-09 01:11:44 +02:00
@staticmethod
def _get_zipinfo_meta(zipinfo: zipfile.ZipInfo) -> Dict[str, str]:
metadata = {}
if zipinfo.create_system == 3:
#metadata['create_system'] = 'Linux'
pass
elif zipinfo.create_system == 2:
metadata['create_system'] = 'Windows'
else:
metadata['create_system'] = 'Weird'
if zipinfo.comment:
2018-06-04 22:54:01 +02:00
metadata['comment'] = zipinfo.comment # type: ignore
if zipinfo.date_time != (1980, 1, 1, 0, 0, 0):
metadata['date_time'] = str(datetime.datetime(*zipinfo.date_time))
return metadata
def remove_all(self) -> bool:
with zipfile.ZipFile(self.filename) as zin,\
zipfile.ZipFile(self.output_filename, 'w') as zout:
temp_folder = tempfile.mkdtemp()
for item in zin.infolist():
if item.filename[-1] == '/': # `is_dir` is added in Python3.6
continue # don't keep empty folders
zin.extract(member=item, path=temp_folder)
full_path = os.path.join(temp_folder, item.filename)
2018-07-01 23:11:10 +02:00
if self._specific_cleanup(full_path) is False:
shutil.rmtree(temp_folder)
os.remove(self.output_filename)
logging.info("Something went wrong during deep cleaning of %s", item.filename)
2018-07-01 23:11:10 +02:00
return False
if item.filename in self.files_to_keep:
# those files aren't supported, but we want to add them anyway
pass
elif any(map(lambda r: r.search(item.filename), self.files_to_omit)):
continue
else:
# supported files that we want to clean then add
tmp_parser, mtype = parser_factory.get_parser(full_path) # type: ignore
if not tmp_parser:
shutil.rmtree(temp_folder)
os.remove(self.output_filename)
logging.info("%s's format (%s) isn't supported", item.filename, mtype)
return False
tmp_parser.remove_all()
os.rename(tmp_parser.output_filename, full_path)
zinfo = zipfile.ZipInfo(item.filename) # type: ignore
clean_zinfo = self._clean_zipinfo(zinfo)
with open(full_path, 'rb') as f:
zout.writestr(clean_zinfo, f.read())
shutil.rmtree(temp_folder)
return True
2018-04-01 01:04:06 +02:00
class MSOfficeParser(ArchiveBasedAbstractParser):
2018-03-31 15:47:06 +02:00
mimetypes = {
2018-05-16 22:36:59 +02:00
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
2018-03-31 15:47:06 +02:00
}
files_to_keep = {
'[Content_Types].xml',
'_rels/.rels',
'word/_rels/document.xml.rels',
'word/document.xml',
'word/fontTable.xml',
'word/settings.xml',
'word/styles.xml',
}
files_to_omit = set(map(re.compile, { # type: ignore
'^docProps/',
}))
2018-03-31 15:47:06 +02:00
2018-07-09 01:11:44 +02:00
@staticmethod
def __remove_revisions(full_path: str) -> bool:
2018-07-01 23:11:10 +02:00
""" In this function, we're changing the XML
document in two times, since we don't want
to change the tree we're iterating on."""
try:
2018-07-09 01:11:44 +02:00
tree, namespace = _parse_xml(full_path)
except ET.ParseError:
return False
2018-07-01 23:11:10 +02:00
# No revisions are present
2018-07-09 01:11:44 +02:00
del_presence = tree.find('.//w:del', namespace)
ins_presence = tree.find('.//w:ins', namespace)
if del_presence is None and ins_presence is None:
2018-07-01 23:11:10 +02:00
return True
parent_map = {c:p for p in tree.iter() for c in p}
2018-07-01 23:11:10 +02:00
2018-07-09 01:11:44 +02:00
elements = list([element for element in tree.iterfind('.//w:del', namespace)])
2018-07-01 23:11:10 +02:00
for element in elements:
parent_map[element].remove(element)
elements = list()
2018-07-09 01:11:44 +02:00
for element in tree.iterfind('.//w:ins', namespace):
2018-07-08 22:27:37 +02:00
for position, item in enumerate(tree.iter()): #pragma: no cover
2018-07-01 23:11:10 +02:00
if item == element:
for children in element.iterfind('./*'):
elements.append((element, position, children))
break
for (element, position, children) in elements:
parent_map[element].insert(position, children)
parent_map[element].remove(element)
tree.write(full_path, xml_declaration=True)
return True
def _specific_cleanup(self, full_path: str) -> bool:
2018-07-01 23:11:10 +02:00
if full_path.endswith('/word/document.xml'):
return self.__remove_revisions(full_path)
return True
def get_meta(self) -> Dict[str, str]:
2018-03-31 20:56:15 +02:00
"""
Yes, I know that parsing xml with regexp ain't pretty,
be my guest and fix it if you want.
"""
2018-03-31 15:47:06 +02:00
metadata = {}
zipin = zipfile.ZipFile(self.filename)
for item in zipin.infolist():
if item.filename.startswith('docProps/') and item.filename.endswith('.xml'):
try:
content = zipin.read(item).decode('utf-8')
results = re.findall(r"<(.+)>(.+)</\1>", content, re.I|re.M)
for (key, value) in results:
metadata[key] = value
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
metadata[item.filename] = 'harmful content'
2018-06-04 22:54:01 +02:00
for key, value in self._get_zipinfo_meta(item).items():
metadata[key] = value
2018-03-31 15:47:06 +02:00
zipin.close()
return metadata
2018-04-01 00:17:06 +02:00
2018-04-01 01:04:06 +02:00
class LibreOfficeParser(ArchiveBasedAbstractParser):
mimetypes = {
2018-05-16 22:36:59 +02:00
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.graphics',
'application/vnd.oasis.opendocument.chart',
'application/vnd.oasis.opendocument.formula',
'application/vnd.oasis.opendocument.image',
2018-04-01 01:04:06 +02:00
}
files_to_keep = {
'META-INF/manifest.xml',
'content.xml',
'manifest.rdf',
'mimetype',
'settings.xml',
'styles.xml',
}
files_to_omit = set(map(re.compile, { # type: ignore
r'^meta\.xml$',
'^Configurations2/',
'^Thumbnails/',
}))
2018-04-01 01:04:06 +02:00
2018-07-09 01:11:44 +02:00
@staticmethod
def __remove_revisions(full_path: str) -> bool:
try:
2018-07-09 01:11:44 +02:00
tree, namespace = _parse_xml(full_path)
except ET.ParseError:
return False
2018-07-01 23:11:10 +02:00
2018-07-09 01:11:44 +02:00
if 'office' not in namespace.keys(): # no revisions in the current file
return True
2018-07-09 01:11:44 +02:00
for text in tree.getroot().iterfind('.//office:text', namespace):
for changes in text.iterfind('.//text:tracked-changes', namespace):
text.remove(changes)
2018-07-01 23:11:10 +02:00
tree.write(full_path, xml_declaration=True)
return True
def _specific_cleanup(self, full_path: str) -> bool:
if os.path.basename(full_path) == 'content.xml':
return self.__remove_revisions(full_path)
return True
def get_meta(self) -> Dict[str, str]:
2018-04-01 01:04:06 +02:00
"""
Yes, I know that parsing xml with regexp ain't pretty,
be my guest and fix it if you want.
"""
metadata = {}
zipin = zipfile.ZipFile(self.filename)
for item in zipin.infolist():
if item.filename == 'meta.xml':
try:
content = zipin.read(item).decode('utf-8')
results = re.findall(r"<((?:meta|dc|cp).+?)>(.+)</\1>", content, re.I|re.M)
for (key, value) in results:
metadata[key] = value
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
metadata[item.filename] = 'harmful content'
2018-06-04 22:54:01 +02:00
for key, value in self._get_zipinfo_meta(item).items():
metadata[key] = value
2018-04-01 01:04:06 +02:00
zipin.close()
return metadata