2020-03-08 12:17:56 +01:00
|
|
|
import random
|
2020-03-07 14:22:36 +01:00
|
|
|
import uuid
|
2018-09-09 18:57:08 +02:00
|
|
|
import logging
|
2018-04-01 01:04:06 +02:00
|
|
|
import os
|
2018-03-31 20:56:15 +02:00
|
|
|
import re
|
2018-04-01 01:04:06 +02:00
|
|
|
import zipfile
|
2023-01-28 16:57:20 +01:00
|
|
|
from typing import Pattern, Any, Tuple, Dict
|
2018-07-10 21:30:38 +02:00
|
|
|
|
2018-09-05 18:41:08 +02:00
|
|
|
import xml.etree.ElementTree as ET # type: ignore
|
2018-06-27 23:10:53 +02:00
|
|
|
|
2019-04-27 13:05:36 +02:00
|
|
|
from .archive import ZipParser
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-09-20 22:37:53 +02:00
|
|
|
# pylint: disable=line-too-long
|
|
|
|
|
2023-01-28 16:57:20 +01:00
|
|
|
|
|
|
|
def _parse_xml(full_path: str) -> Tuple[ET.ElementTree, Dict[str, str]]:
|
2018-09-09 18:57:08 +02:00
|
|
|
""" This function parses XML, with namespace support. """
|
2018-07-19 22:52:40 +02:00
|
|
|
namespace_map = dict()
|
|
|
|
for _, (key, value) in ET.iterparse(full_path, ("start-ns", )):
|
2018-10-02 17:59:39 +02:00
|
|
|
# The ns[0-9]+ namespaces are reserved for internal usage, so
|
2018-09-09 18:57:08 +02:00
|
|
|
# we have to use an other nomenclature.
|
2018-09-24 19:50:24 +02:00
|
|
|
if re.match('^ns[0-9]+$', key, re.I): # pragma: no cover
|
2018-09-20 22:37:53 +02:00
|
|
|
key = 'mat' + key[2:]
|
2018-09-09 18:57:08 +02:00
|
|
|
|
2018-07-19 22:52:40 +02:00
|
|
|
namespace_map[key] = value
|
2018-07-09 01:11:44 +02:00
|
|
|
ET.register_namespace(key, value)
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
return ET.parse(full_path), namespace_map
|
2018-07-01 23:11:10 +02:00
|
|
|
|
|
|
|
|
2018-09-09 18:57:08 +02:00
|
|
|
def _sort_xml_attributes(full_path: str) -> bool:
|
|
|
|
""" Sort xml attributes lexicographically,
|
|
|
|
because it's possible to fingerprint producers (MS Office, Libreoffice, …)
|
|
|
|
since they are all using different orders.
|
|
|
|
"""
|
|
|
|
tree = ET.parse(full_path)
|
|
|
|
|
2018-10-01 21:25:37 +02:00
|
|
|
for c in tree.getroot():
|
2018-09-09 18:57:08 +02:00
|
|
|
c[:] = sorted(c, key=lambda child: (child.tag, child.get('desc')))
|
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-09-09 18:57:08 +02:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
2019-04-27 13:05:36 +02:00
|
|
|
class MSOfficeParser(ZipParser):
|
2019-09-01 13:07:56 +02:00
|
|
|
"""
|
|
|
|
The methods modifying XML documents are usually doing so in two loops:
|
|
|
|
1. finding the tag/attributes to remove;
|
|
|
|
2. actually editing the document
|
|
|
|
since it's tricky to modify the XML while iterating on it.
|
|
|
|
"""
|
2018-03-31 15:47:06 +02:00
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
|
|
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
|
2018-03-31 15:47:06 +02:00
|
|
|
}
|
2018-10-01 22:26:35 +02:00
|
|
|
content_types_to_keep = {
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml', # /word/endnotes.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml', # /word/footnotes.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.extended-properties+xml', # /docProps/app.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml', # /word/document.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml', # /word/fontTable.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml', # /word/footer.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.header+xml', # /word/header.xml
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml', # /word/styles.xml
|
2023-07-11 21:36:52 +02:00
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml', # /word/numbering.xml (used for bullet point formatting)
|
|
|
|
'application/vnd.openxmlformats-officedocument.theme+xml', # /word/theme/theme[0-9].xml (used for font and background coloring, etc.)
|
2018-10-01 22:26:35 +02:00
|
|
|
'application/vnd.openxmlformats-package.core-properties+xml', # /docProps/core.xml
|
|
|
|
|
2023-07-11 21:36:52 +02:00
|
|
|
# for more complicated powerpoints
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml',
|
|
|
|
'application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml',
|
|
|
|
'application/vnd.ms-office.drawingml.diagramDrawing+xml',
|
|
|
|
|
2018-10-01 22:26:35 +02:00
|
|
|
# Do we want to keep the following ones?
|
|
|
|
'application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml',
|
|
|
|
}
|
2018-10-03 15:22:36 +02:00
|
|
|
|
2018-10-01 22:26:35 +02:00
|
|
|
def __init__(self, filename):
|
|
|
|
super().__init__(filename)
|
2018-10-03 15:22:36 +02:00
|
|
|
|
2020-03-08 12:17:56 +01:00
|
|
|
# MSOffice documents are using various counters for cross-references,
|
|
|
|
# we collect them all, to make sure that they're effectively counters,
|
|
|
|
# and not unique id used for fingerprinting.
|
|
|
|
self.__counters = {
|
|
|
|
'cNvPr': set(),
|
|
|
|
'rid': set(),
|
|
|
|
}
|
|
|
|
|
2018-10-03 15:22:36 +02:00
|
|
|
self.files_to_keep = set(map(re.compile, { # type: ignore
|
|
|
|
r'^\[Content_Types\]\.xml$',
|
|
|
|
r'^_rels/\.rels$',
|
2021-03-14 14:41:40 +01:00
|
|
|
r'^xl/sharedStrings\.xml$', # https://docs.microsoft.com/en-us/office/open-xml/working-with-the-shared-string-table
|
2021-07-14 23:34:02 +02:00
|
|
|
r'^xl/calcChain\.xml$',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/_rels/(document|workbook|presentation)\.xml\.rels$',
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/_rels/footer[0-9]*\.xml\.rels$',
|
|
|
|
r'^(?:word|ppt|xl)/_rels/header[0-9]*\.xml\.rels$',
|
2022-12-25 18:05:13 +01:00
|
|
|
r'^(?:word|ppt|xl)/charts/_rels/chart[0-9]+\.xml\.rels$',
|
|
|
|
r'^(?:word|ppt|xl)/charts/colors[0-9]+\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/charts/style[0-9]+\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/drawings/_rels/drawing[0-9]+\.xml\.rels$',
|
2020-05-17 16:53:36 +02:00
|
|
|
r'^(?:word|ppt|xl)/styles\.xml$',
|
|
|
|
# TODO: randomize axId ( https://docs.microsoft.com/en-us/openspecs/office_standards/ms-oi29500/089f849f-fcd6-4fa0-a281-35aa6a432a16 )
|
|
|
|
r'^(?:word|ppt|xl)/charts/chart[0-9]*\.xml$',
|
2020-11-06 15:26:30 +01:00
|
|
|
r'^xl/workbook\.xml$',
|
|
|
|
r'^xl/worksheets/sheet[0-9]+\.xml$',
|
2019-10-17 23:02:17 +02:00
|
|
|
r'^ppt/slideLayouts/_rels/slideLayout[0-9]+\.xml\.rels$',
|
2020-03-07 12:49:45 +01:00
|
|
|
r'^ppt/slideLayouts/slideLayout[0-9]+\.xml$',
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/tableStyles\.xml$',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/tables/table[0-9]+\.xml$',
|
2020-03-08 12:17:56 +01:00
|
|
|
r'^ppt/slides/_rels/slide[0-9]*\.xml\.rels$',
|
|
|
|
r'^ppt/slides/slide[0-9]*\.xml$',
|
2018-10-03 15:22:36 +02:00
|
|
|
# https://msdn.microsoft.com/en-us/library/dd908153(v=office.12).aspx
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/stylesWithEffects\.xml$',
|
2020-03-08 12:17:56 +01:00
|
|
|
r'^ppt/presentation\.xml$',
|
|
|
|
# TODO: check if p:bgRef can be randomized
|
|
|
|
r'^ppt/slideMasters/slideMaster[0-9]+\.xml',
|
|
|
|
r'^ppt/slideMasters/_rels/slideMaster[0-9]+\.xml\.rels',
|
2021-05-20 18:16:28 +02:00
|
|
|
r'^xl/worksheets/_rels/sheet[0-9]+\.xml\.rels',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/drawings/vmlDrawing[0-9]+\.vml',
|
|
|
|
r'^(?:word|ppt|xl)/drawings/drawing[0-9]+\.xml',
|
|
|
|
r'^(?:word|ppt|xl)/embeddings/Microsoft_Excel_Worksheet[0-9]+\.xlsx',
|
|
|
|
# rels for complicated powerpoints
|
|
|
|
r'^ppt/notesSlides/_rels/notesSlide[0-9]+\.xml\.rels',
|
|
|
|
r'^ppt/notesMasters/_rels/notesMaster[0-9]+\.xml\.rels',
|
|
|
|
r'^ppt/handoutMasters/_rels/handoutMaster[0-9]+\.xml\.rels',
|
2018-10-03 15:22:36 +02:00
|
|
|
}))
|
|
|
|
self.files_to_omit = set(map(re.compile, { # type: ignore
|
2021-03-14 14:35:29 +01:00
|
|
|
r'^\[trash\]/',
|
2018-10-03 15:22:36 +02:00
|
|
|
r'^customXml/',
|
|
|
|
r'webSettings\.xml$',
|
|
|
|
r'^docProps/custom\.xml$',
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/printerSettings/',
|
|
|
|
r'^(?:word|ppt|xl)/theme',
|
|
|
|
r'^(?:word|ppt|xl)/people\.xml$',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/persons/person\.xml$',
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/numbering\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/tags/',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/glossary/',
|
2019-11-30 11:38:22 +01:00
|
|
|
# View properties like view mode, last viewed slide etc
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/viewProps\.xml$',
|
2019-11-30 11:38:22 +01:00
|
|
|
# Additional presentation-wide properties like printing properties,
|
|
|
|
# presentation show properties etc.
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/presProps\.xml$',
|
2024-04-03 20:49:39 +02:00
|
|
|
r'^(?:word|ppt|xl)/comments[0-9]*\.xml$',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'^(?:word|ppt|xl)/threadedComments/threadedComment[0-9]*\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/commentsExtended\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/commentsExtensible\.xml$',
|
|
|
|
r'^(?:word|ppt|xl)/commentsIds\.xml$',
|
2019-02-20 00:45:27 +01:00
|
|
|
# we have an allowlist in self.files_to_keep,
|
2018-10-03 15:22:36 +02:00
|
|
|
# so we can trash everything else
|
2020-04-02 20:58:10 +02:00
|
|
|
r'^(?:word|ppt|xl)/_rels/',
|
2023-07-11 21:36:52 +02:00
|
|
|
r'docMetadata/LabelInfo\.xml$'
|
2018-10-03 15:22:36 +02:00
|
|
|
}))
|
|
|
|
|
2018-10-01 22:26:35 +02:00
|
|
|
if self.__fill_files_to_keep_via_content_types() is False:
|
|
|
|
raise ValueError
|
|
|
|
|
|
|
|
def __fill_files_to_keep_via_content_types(self) -> bool:
|
|
|
|
""" There is a suer-handy `[Content_Types].xml` file
|
|
|
|
in MS Office archives, describing what each other file contains.
|
2019-02-20 00:45:27 +01:00
|
|
|
The self.content_types_to_keep member contains a type allowlist,
|
2018-10-01 22:26:35 +02:00
|
|
|
so we're using it to fill the self.files_to_keep one.
|
|
|
|
"""
|
|
|
|
with zipfile.ZipFile(self.filename) as zin:
|
|
|
|
if '[Content_Types].xml' not in zin.namelist():
|
|
|
|
return False
|
|
|
|
xml_data = zin.read('[Content_Types].xml')
|
|
|
|
|
2023-05-03 22:28:02 +02:00
|
|
|
self.content_types: Dict[str, str] = dict()
|
2018-10-01 22:26:35 +02:00
|
|
|
try:
|
|
|
|
tree = ET.fromstring(xml_data)
|
|
|
|
except ET.ParseError:
|
|
|
|
return False
|
|
|
|
for c in tree:
|
2020-11-13 17:27:23 +01:00
|
|
|
if 'PartName' not in c.attrib or 'ContentType' not in c.attrib: # pragma: no cover
|
2018-10-01 22:26:35 +02:00
|
|
|
continue
|
|
|
|
elif c.attrib['ContentType'] in self.content_types_to_keep:
|
|
|
|
fname = c.attrib['PartName'][1:] # remove leading `/`
|
|
|
|
re_fname = re.compile('^' + re.escape(fname) + '$')
|
|
|
|
self.files_to_keep.add(re_fname) # type: ignore
|
|
|
|
return True
|
|
|
|
|
2018-09-20 22:37:53 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_rsid(full_path: str) -> bool:
|
2019-09-01 12:54:56 +02:00
|
|
|
""" The method will remove "revision session ID". We're using '}rsid'
|
2018-09-20 22:37:53 +02:00
|
|
|
instead of proper parsing, since rsid can have multiple forms, like
|
|
|
|
`rsidRDefault`, `rsidR`, `rsids`, …
|
|
|
|
|
|
|
|
For more details, see
|
|
|
|
- https://msdn.microsoft.com/en-us/library/office/documentformat.openxml.wordprocessing.previoussectionproperties.rsidrpr.aspx
|
|
|
|
- https://blogs.msdn.microsoft.com/brian_jones/2006/12/11/whats-up-with-all-those-rsids/
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
2020-03-08 12:17:56 +01:00
|
|
|
except ET.ParseError as e: # pragma: no cover
|
2019-09-01 12:54:56 +02:00
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-09-20 22:37:53 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# rsid, tags or attributes, are always under the `w` namespace
|
2021-12-26 15:23:26 +01:00
|
|
|
if 'w' not in namespace:
|
2018-09-20 22:37:53 +02:00
|
|
|
return True
|
|
|
|
|
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
|
|
|
|
|
|
|
elements_to_remove = list()
|
|
|
|
for item in tree.iterfind('.//', namespace):
|
2018-10-01 21:25:37 +02:00
|
|
|
if '}rsid' in item.tag.strip().lower(): # rsid as tag
|
2018-09-20 22:37:53 +02:00
|
|
|
elements_to_remove.append(item)
|
|
|
|
continue
|
|
|
|
for key in list(item.attrib.keys()): # rsid as attribute
|
|
|
|
if '}rsid' in key.lower():
|
|
|
|
del item.attrib[key]
|
|
|
|
|
|
|
|
for element in elements_to_remove:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-09-20 22:37:53 +02:00
|
|
|
return True
|
|
|
|
|
2019-09-01 12:54:56 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_nsid(full_path: str) -> bool:
|
|
|
|
"""
|
2019-09-01 13:07:56 +02:00
|
|
|
nsid are random identifiers that can be used to ease the merging of
|
|
|
|
some components of a document. They can also be used for
|
|
|
|
fingerprinting.
|
2019-09-01 12:54:56 +02:00
|
|
|
|
|
|
|
See the spec for more details: https://docs.microsoft.com/en-us/dotnet/api/documentformat.openxml.wordprocessing.nsid?view=openxml-2.8.1
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
2019-09-01 13:07:56 +02:00
|
|
|
# The nsid tag is always under the `w` namespace
|
2021-12-26 15:23:26 +01:00
|
|
|
if 'w' not in namespace:
|
2019-09-01 12:54:56 +02:00
|
|
|
return True
|
|
|
|
|
2023-01-28 16:57:20 +01:00
|
|
|
parent_map = {c: p for p in tree.iter() for c in p}
|
2019-09-01 12:54:56 +02:00
|
|
|
|
|
|
|
elements_to_remove = list()
|
|
|
|
for element in tree.iterfind('.//w:nsid', namespace):
|
|
|
|
elements_to_remove.append(element)
|
|
|
|
for element in elements_to_remove:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2019-09-01 12:54:56 +02:00
|
|
|
return True
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2020-03-08 12:17:56 +01:00
|
|
|
except ET.ParseError as e: # pragma: no cover
|
2018-09-09 18:57:08 +02:00
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-07-08 21:35:45 +02:00
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-19 23:10:27 +02:00
|
|
|
# Revisions are either deletions (`w:del`) or
|
|
|
|
# insertions (`w:ins`)
|
2018-07-09 01:11:44 +02:00
|
|
|
del_presence = tree.find('.//w:del', namespace)
|
|
|
|
ins_presence = tree.find('.//w:ins', namespace)
|
2018-07-07 18:05:54 +02:00
|
|
|
if del_presence is None and ins_presence is None:
|
2018-07-19 23:10:27 +02:00
|
|
|
return True # No revisions are present
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2018-10-05 17:00:59 +02:00
|
|
|
elements_del = list()
|
2018-07-19 23:10:27 +02:00
|
|
|
for element in tree.iterfind('.//w:del', namespace):
|
2018-10-05 17:00:59 +02:00
|
|
|
elements_del.append(element)
|
|
|
|
for element in elements_del:
|
2018-07-01 23:11:10 +02:00
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
2018-10-05 17:00:59 +02:00
|
|
|
elements_ins = list()
|
2018-07-09 01:11:44 +02:00
|
|
|
for element in tree.iterfind('.//w:ins', namespace):
|
2018-09-24 20:15:07 +02:00
|
|
|
for position, item in enumerate(tree.iter()): # pragma: no cover
|
2018-07-01 23:11:10 +02:00
|
|
|
if item == element:
|
|
|
|
for children in element.iterfind('./*'):
|
2018-10-05 17:00:59 +02:00
|
|
|
elements_ins.append((element, position, children))
|
2018-07-01 23:11:10 +02:00
|
|
|
break
|
2024-09-12 23:28:16 +02:00
|
|
|
|
2018-10-05 17:00:59 +02:00
|
|
|
for (element, position, children) in elements_ins:
|
2018-07-01 23:11:10 +02:00
|
|
|
parent_map[element].insert(position, children)
|
2024-09-12 23:28:16 +02:00
|
|
|
|
|
|
|
# the list can sometimes contain duplicate elements, so don't remove
|
|
|
|
# until all children have been processed
|
|
|
|
for (element, position, children) in elements_ins:
|
|
|
|
if element in parent_map[element]:
|
|
|
|
parent_map[element].remove(element)
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-07-01 23:11:10 +02:00
|
|
|
return True
|
|
|
|
|
2024-04-03 21:20:00 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_document_comment_meta(full_path: str) -> bool:
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
2024-04-05 18:33:30 +02:00
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# search the docs to see if we can bail early
|
|
|
|
range_start = tree.find('.//w:commentRangeStart', namespace)
|
|
|
|
range_end = tree.find('.//w:commentRangeEnd', namespace)
|
|
|
|
references = tree.find('.//w:commentReference', namespace)
|
|
|
|
if range_start is None and range_end is None and references is None:
|
|
|
|
return True # No comment meta tags are present
|
|
|
|
|
|
|
|
parent_map = {c:p for p in tree.iter() for c in p}
|
|
|
|
|
|
|
|
# iterate over the elements and add them to list
|
|
|
|
elements_del = list()
|
|
|
|
for element in tree.iterfind('.//w:commentRangeStart', namespace):
|
|
|
|
elements_del.append(element)
|
|
|
|
for element in tree.iterfind('.//w:commentRangeEnd', namespace):
|
|
|
|
elements_del.append(element)
|
|
|
|
for element in tree.iterfind('.//w:commentReference', namespace):
|
|
|
|
elements_del.append(element)
|
|
|
|
|
|
|
|
# remove the elements
|
|
|
|
for element in elements_del:
|
|
|
|
parent_map[element].remove(element)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-07-01 23:11:10 +02:00
|
|
|
return True
|
|
|
|
|
2024-04-05 18:45:58 +02:00
|
|
|
def __remove_document_xml_rels_members(self, full_path: str) -> bool:
|
|
|
|
""" Remove the dangling references from the word/_rels/document.xml.rels file, since MS office doesn't like them.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
if len(namespace.items()) != 1: # pragma: no cover
|
|
|
|
logging.debug("Got several namespaces for Types: %s", namespace.items())
|
|
|
|
|
|
|
|
removed_fnames = set()
|
|
|
|
with zipfile.ZipFile(self.filename) as zin:
|
|
|
|
for fname in [item.filename for item in zin.infolist()]:
|
|
|
|
for file_to_omit in self.files_to_omit:
|
|
|
|
if file_to_omit.search(fname):
|
|
|
|
matches = map(lambda r: r.search(fname), self.files_to_keep)
|
|
|
|
if any(matches): # the file is in the allowlist
|
|
|
|
continue
|
|
|
|
removed_fnames.add(fname)
|
|
|
|
break
|
|
|
|
|
|
|
|
root = tree.getroot()
|
|
|
|
for item in root.findall('{%s}Relationship' % namespace['']):
|
|
|
|
name = 'word/' + item.attrib['Target'] # add the word/ prefix to the path, since all document rels are in the word/ directory
|
|
|
|
if name in removed_fnames:
|
|
|
|
root.remove(item)
|
|
|
|
|
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
|
|
|
return True
|
|
|
|
|
2018-09-30 19:52:35 +02:00
|
|
|
def __remove_content_type_members(self, full_path: str) -> bool:
|
|
|
|
""" The method will remove the dangling references
|
|
|
|
form the [Content_Types].xml file, since MS office doesn't like them
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
2019-09-01 12:54:56 +02:00
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-09-30 19:52:35 +02:00
|
|
|
return False
|
|
|
|
|
2020-11-13 17:27:23 +01:00
|
|
|
if len(namespace.items()) != 1: # pragma: no cover
|
2020-11-06 15:29:42 +01:00
|
|
|
logging.debug("Got several namespaces for Types: %s", namespace.items())
|
2018-09-30 19:52:35 +02:00
|
|
|
|
|
|
|
removed_fnames = set()
|
|
|
|
with zipfile.ZipFile(self.filename) as zin:
|
|
|
|
for fname in [item.filename for item in zin.infolist()]:
|
2018-10-03 16:35:36 +02:00
|
|
|
for file_to_omit in self.files_to_omit:
|
|
|
|
if file_to_omit.search(fname):
|
|
|
|
matches = map(lambda r: r.search(fname), self.files_to_keep)
|
2019-02-20 00:45:27 +01:00
|
|
|
if any(matches): # the file is in the allowlist
|
2018-10-03 16:35:36 +02:00
|
|
|
continue
|
|
|
|
removed_fnames.add(fname)
|
|
|
|
break
|
2018-09-30 19:52:35 +02:00
|
|
|
|
|
|
|
root = tree.getroot()
|
|
|
|
for item in root.findall('{%s}Override' % namespace['']):
|
|
|
|
name = item.attrib['PartName'][1:] # remove the leading '/'
|
|
|
|
if name in removed_fnames:
|
|
|
|
root.remove(item)
|
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-09-30 19:52:35 +02:00
|
|
|
return True
|
|
|
|
|
2020-03-08 12:17:56 +01:00
|
|
|
def _final_checks(self) -> bool:
|
|
|
|
for k, v in self.__counters.items():
|
|
|
|
if v and len(v) != max(v):
|
|
|
|
# TODO: make this an error and return False
|
|
|
|
# once the ability to correct the counters is implemented
|
|
|
|
logging.warning("%s contains invalid %s: %s", self.filename, k, v)
|
|
|
|
return True
|
|
|
|
return True
|
|
|
|
|
|
|
|
def __collect_counters(self, full_path: str):
|
|
|
|
with open(full_path, encoding='utf-8') as f:
|
|
|
|
content = f.read()
|
|
|
|
# "relationship Id"
|
|
|
|
for i in re.findall(r'(?:\s|r:)[iI][dD]="rId([0-9]+)"(?:\s|/)', content):
|
|
|
|
self.__counters['rid'].add(int(i))
|
|
|
|
# "connector for Non-visual property"
|
|
|
|
for i in re.findall(r'<p:cNvPr id="([0-9]+)"', content):
|
|
|
|
self.__counters['cNvPr'].add(int(i))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __randomize_creationId(full_path: str) -> bool:
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
2021-12-26 15:23:26 +01:00
|
|
|
if 'p14' not in namespace:
|
2020-03-08 12:17:56 +01:00
|
|
|
return True # pragma: no cover
|
|
|
|
|
|
|
|
for item in tree.iterfind('.//p14:creationId', namespace):
|
|
|
|
item.set('val', '%s' % random.randint(0, 2**32))
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2020-03-08 12:17:56 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __randomize_sldMasterId(full_path: str) -> bool:
|
|
|
|
try:
|
|
|
|
tree, namespace = _parse_xml(full_path)
|
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
2021-12-26 15:23:26 +01:00
|
|
|
if 'p' not in namespace:
|
2020-03-08 12:17:56 +01:00
|
|
|
return True # pragma: no cover
|
|
|
|
|
|
|
|
for item in tree.iterfind('.//p:sldMasterId', namespace):
|
|
|
|
item.set('id', '%s' % random.randint(0, 2**32))
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2020-03-08 12:17:56 +01:00
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2020-03-08 12:17:56 +01:00
|
|
|
# pylint: disable=too-many-return-statements,too-many-branches
|
2018-09-09 18:57:08 +02:00
|
|
|
if os.stat(full_path).st_size == 0: # Don't process empty files
|
|
|
|
return True
|
|
|
|
|
2024-04-05 18:45:58 +02:00
|
|
|
if not full_path.endswith(('.xml', '.xml.rels')):
|
2018-10-01 21:25:37 +02:00
|
|
|
return True
|
|
|
|
|
2020-03-08 12:17:56 +01:00
|
|
|
if self.__randomize_creationId(full_path) is False:
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.__collect_counters(full_path)
|
|
|
|
|
2018-09-30 19:52:35 +02:00
|
|
|
if full_path.endswith('/[Content_Types].xml'):
|
|
|
|
# this file contains references to files that we might
|
|
|
|
# remove, and MS Office doesn't like dangling references
|
2020-11-13 17:27:23 +01:00
|
|
|
if self.__remove_content_type_members(full_path) is False: # pragma: no cover
|
2018-09-30 19:52:35 +02:00
|
|
|
return False
|
2018-10-01 21:25:37 +02:00
|
|
|
elif full_path.endswith('/word/document.xml'):
|
2018-07-19 23:10:27 +02:00
|
|
|
# this file contains the revisions
|
2018-09-20 22:37:53 +02:00
|
|
|
if self.__remove_revisions(full_path) is False:
|
2020-03-08 12:17:56 +01:00
|
|
|
return False # pragma: no cover
|
2024-04-03 21:20:00 +02:00
|
|
|
# remove comment references and ranges
|
|
|
|
if self.__remove_document_comment_meta(full_path) is False:
|
|
|
|
return False # pragma: no cover
|
2024-04-05 18:45:58 +02:00
|
|
|
elif full_path.endswith('/word/_rels/document.xml.rels'):
|
|
|
|
# similar to the above, but for the document.xml.rels file
|
|
|
|
if self.__remove_document_xml_rels_members(full_path) is False: # pragma: no cover
|
|
|
|
return False
|
2018-10-01 21:25:37 +02:00
|
|
|
elif full_path.endswith('/docProps/app.xml'):
|
|
|
|
# This file must be present and valid,
|
|
|
|
# so we're removing as much as we can.
|
|
|
|
with open(full_path, 'wb') as f:
|
|
|
|
f.write(b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
|
|
|
|
f.write(b'<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties">')
|
|
|
|
f.write(b'</Properties>')
|
|
|
|
elif full_path.endswith('/docProps/core.xml'):
|
|
|
|
# This file must be present and valid,
|
|
|
|
# so we're removing as much as we can.
|
|
|
|
with open(full_path, 'wb') as f:
|
|
|
|
f.write(b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
|
|
|
|
f.write(b'<cp:coreProperties xmlns:cp="http://schemas.openxmlformats.org/package/2006/metadata/core-properties">')
|
|
|
|
f.write(b'</cp:coreProperties>')
|
2020-03-07 14:22:36 +01:00
|
|
|
elif full_path.endswith('/ppt/tableStyles.xml'): # pragma: no cover
|
|
|
|
# This file must be present and valid,
|
|
|
|
# so we're removing as much as we can.
|
|
|
|
with open(full_path, 'wb') as f:
|
2020-03-07 14:34:07 +01:00
|
|
|
f.write(b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
|
2020-03-07 14:22:36 +01:00
|
|
|
uid = str(uuid.uuid4()).encode('utf-8')
|
|
|
|
f.write(b'<a:tblStyleLst def="{%s}" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"/>' % uid)
|
2020-03-08 12:17:56 +01:00
|
|
|
elif full_path.endswith('ppt/presentation.xml'):
|
|
|
|
if self.__randomize_sldMasterId(full_path) is False:
|
|
|
|
return False # pragma: no cover
|
2018-10-01 21:25:37 +02:00
|
|
|
|
|
|
|
if self.__remove_rsid(full_path) is False:
|
2020-03-08 12:17:56 +01:00
|
|
|
return False # pragma: no cover
|
2018-09-20 22:37:53 +02:00
|
|
|
|
2019-09-01 12:54:56 +02:00
|
|
|
if self.__remove_nsid(full_path) is False:
|
|
|
|
return False # pragma: no cover
|
|
|
|
|
2018-10-01 21:25:37 +02:00
|
|
|
try:
|
|
|
|
_sort_xml_attributes(full_path)
|
|
|
|
except ET.ParseError as e: # pragma: no cover
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# This is awful, I'm sorry.
|
|
|
|
#
|
|
|
|
# Microsoft Office isn't happy when we have the `mc:Ignorable`
|
|
|
|
# tag containing namespaces that aren't present in the xml file,
|
|
|
|
# so instead of trying to remove this specific tag with etree,
|
|
|
|
# we're removing it, with a regexp.
|
|
|
|
#
|
|
|
|
# Since we're the ones producing this file, via the call to
|
|
|
|
# _sort_xml_attributes, there won't be any "funny tricks".
|
|
|
|
# Worst case, the tag isn't present, and everything is fine.
|
|
|
|
#
|
|
|
|
# see: https://docs.microsoft.com/en-us/dotnet/framework/wpf/advanced/mc-ignorable-attribute
|
|
|
|
with open(full_path, 'rb') as f:
|
|
|
|
text = f.read()
|
|
|
|
out = re.sub(b'mc:Ignorable="[^"]*"', b'', text, 1)
|
|
|
|
with open(full_path, 'wb') as f:
|
|
|
|
f.write(out)
|
2018-09-20 22:37:53 +02:00
|
|
|
|
2018-07-01 23:11:10 +02:00
|
|
|
return True
|
|
|
|
|
2023-01-28 17:22:26 +01:00
|
|
|
def _specific_get_meta(self, full_path: str, file_path: str) -> Dict[str, Any]:
|
2018-03-31 20:56:15 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
2019-02-03 22:55:15 +01:00
|
|
|
if not file_path.startswith('docProps/') or not file_path.endswith('.xml'):
|
|
|
|
return {}
|
|
|
|
|
|
|
|
with open(full_path, encoding='utf-8') as f:
|
|
|
|
try:
|
2023-01-28 16:57:20 +01:00
|
|
|
results = re.findall(r"<(.+)>(.+)</\1>", f.read(), re.I | re.M)
|
|
|
|
return {k: v for (k, v) in results}
|
2019-02-03 22:55:15 +01:00
|
|
|
except (TypeError, UnicodeDecodeError):
|
|
|
|
# We didn't manage to parse the xml file
|
|
|
|
return {file_path: 'harmful content', }
|
2018-03-31 15:47:06 +02:00
|
|
|
|
2018-04-01 00:17:06 +02:00
|
|
|
|
2019-04-27 13:05:36 +02:00
|
|
|
class LibreOfficeParser(ZipParser):
|
2018-04-01 01:04:06 +02:00
|
|
|
mimetypes = {
|
2018-05-16 22:36:59 +02:00
|
|
|
'application/vnd.oasis.opendocument.text',
|
|
|
|
'application/vnd.oasis.opendocument.spreadsheet',
|
|
|
|
'application/vnd.oasis.opendocument.presentation',
|
|
|
|
'application/vnd.oasis.opendocument.graphics',
|
|
|
|
'application/vnd.oasis.opendocument.chart',
|
|
|
|
'application/vnd.oasis.opendocument.formula',
|
|
|
|
'application/vnd.oasis.opendocument.image',
|
2018-04-01 01:04:06 +02:00
|
|
|
}
|
2018-10-03 15:22:36 +02:00
|
|
|
|
|
|
|
def __init__(self, filename):
|
|
|
|
super().__init__(filename)
|
|
|
|
|
|
|
|
self.files_to_keep = set(map(re.compile, { # type: ignore
|
|
|
|
r'^META-INF/manifest\.xml$',
|
|
|
|
r'^content\.xml$',
|
|
|
|
r'^manifest\.rdf$',
|
|
|
|
r'^mimetype$',
|
|
|
|
r'^settings\.xml$',
|
|
|
|
r'^styles\.xml$',
|
|
|
|
}))
|
|
|
|
self.files_to_omit = set(map(re.compile, { # type: ignore
|
|
|
|
r'^meta\.xml$',
|
2019-11-30 10:25:05 +01:00
|
|
|
r'^layout-cache$',
|
2018-10-03 15:22:36 +02:00
|
|
|
r'^Configurations2/',
|
|
|
|
r'^Thumbnails/',
|
|
|
|
}))
|
2018-04-01 01:04:06 +02:00
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def __remove_revisions(full_path: str) -> bool:
|
2018-07-08 21:35:45 +02:00
|
|
|
try:
|
2018-07-09 01:11:44 +02:00
|
|
|
tree, namespace = _parse_xml(full_path)
|
2018-09-09 18:57:08 +02:00
|
|
|
except ET.ParseError as e:
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
2018-07-08 21:35:45 +02:00
|
|
|
return False
|
2018-07-01 23:11:10 +02:00
|
|
|
|
2021-12-26 15:23:26 +01:00
|
|
|
if 'office' not in namespace: # no revisions in the current file
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2018-07-09 01:11:44 +02:00
|
|
|
for text in tree.getroot().iterfind('.//office:text', namespace):
|
|
|
|
for changes in text.iterfind('.//text:tracked-changes', namespace):
|
2018-06-27 23:10:53 +02:00
|
|
|
text.remove(changes)
|
|
|
|
|
2024-04-03 21:27:48 +02:00
|
|
|
tree.write(full_path, xml_declaration=True, encoding='utf-8')
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2018-07-02 00:22:05 +02:00
|
|
|
def _specific_cleanup(self, full_path: str) -> bool:
|
2018-09-09 18:57:08 +02:00
|
|
|
if os.stat(full_path).st_size == 0: # Don't process empty files
|
|
|
|
return True
|
|
|
|
|
|
|
|
if os.path.basename(full_path).endswith('.xml'):
|
|
|
|
if os.path.basename(full_path) == 'content.xml':
|
|
|
|
if self.__remove_revisions(full_path) is False:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
_sort_xml_attributes(full_path)
|
|
|
|
except ET.ParseError as e:
|
|
|
|
logging.error("Unable to parse %s: %s", full_path, e)
|
|
|
|
return False
|
2018-06-27 23:10:53 +02:00
|
|
|
return True
|
|
|
|
|
2023-01-28 17:22:26 +01:00
|
|
|
def _specific_get_meta(self, full_path: str, file_path: str) -> Dict[str, Any]:
|
2018-04-01 01:04:06 +02:00
|
|
|
"""
|
|
|
|
Yes, I know that parsing xml with regexp ain't pretty,
|
|
|
|
be my guest and fix it if you want.
|
|
|
|
"""
|
2019-02-07 21:58:10 +01:00
|
|
|
if file_path != 'meta.xml':
|
|
|
|
return {}
|
|
|
|
with open(full_path, encoding='utf-8') as f:
|
|
|
|
try:
|
2019-02-08 23:23:56 +01:00
|
|
|
results = re.findall(r"<((?:meta|dc|cp).+?)[^>]*>(.+)</\1>", f.read(), re.I|re.M)
|
2019-02-07 21:58:10 +01:00
|
|
|
return {k:v for (k, v) in results}
|
|
|
|
except (TypeError, UnicodeDecodeError): # We didn't manage to parse the xml file
|
|
|
|
# We didn't manage to parse the xml file
|
|
|
|
return {file_path: 'harmful content', }
|