1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
|
#!/usr/bin/env python3
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
#
# Check that external references between documentation HTML files are not broken.
import sys
import os
import argparse
import re
import xml.etree.ElementTree as ET
ns = {'html': 'http://www.w3.org/1999/xhtml'}
externallinks = []
def get_file_list(prefix):
filelist = []
for root, dir, files in os.walk(prefix):
prefixbase = os.path.dirname(prefix)
if root.startswith(prefixbase):
relroot = root[len(prefixbase):]
else:
relroot = root
for file in files:
if not re.search('\\.html$', file):
continue
# the 404 page doesn't play well
if '404.html' in file:
continue
fullfilename = os.path.join(root, file)
relfilename = os.path.join(relroot, file)
filelist.append((fullfilename, relfilename))
return filelist
# loads an XHTML and extracts all anchors, local and remote links for the one file
def process_file(filetuple):
filename, relfilename = filetuple
tree = ET.parse(filename)
root = tree.getroot()
anchors = [relfilename]
targets = []
for elem in root.findall('.//html:a', ns):
target = elem.get('href')
an = elem.get('id')
if an:
anchors.append(relfilename + '#' + an)
if target:
if re.search('://', target):
externallinks.append(target)
elif target[0] != '#' and 'mailto:' not in target:
dirname = os.path.dirname(relfilename)
targetname = os.path.normpath(os.path.join(dirname, target))
targets.append((targetname, filename, target))
# older docutils generate "<div class='section'"
for elem in root.findall('.//html:div/[@class=\'section\']', ns):
an = elem.get('id')
if an:
anchors.append(relfilename + '#' + an)
# modern docutils generate a <section element
for elem in root.findall('.//html:section', ns):
an = elem.get('id')
if an:
anchors.append(relfilename + '#' + an)
return (anchors, targets)
def process_all(filelist):
anchors = []
targets = []
for filetuple in filelist:
anchor, target = process_file(filetuple)
targets = targets + target
anchors = anchors + anchor
return (targets, anchors)
def check_targets(targets, anchors):
errors = []
for target, targetfrom, targetorig in targets:
if target not in anchors:
errors.append((targetfrom, targetorig))
if errors:
errors.sort()
print('broken link targets:')
for file, target in errors:
print(file + " broken link: " + target)
return True
return False
parser = argparse.ArgumentParser(description='HTML reference checker')
parser.add_argument('--prefix', default='.',
help='build tree prefix')
parser.add_argument('--external', action="store_true",
help='print external references instead')
args = parser.parse_args()
files = get_file_list(args.prefix)
targets, anchors = process_all(files)
if args.external:
prev = None
externallinks.sort()
for ext in externallinks:
if ext != prev:
print(ext)
prev = ext
else:
if check_targets(targets, anchors):
sys.exit(1)
sys.exit(0)
|