1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
|
#!/usr/bin/env perl
######################################################################
#
# This script find duplicates of #include files, ignoring #ifdef's, etc.
# from C source files, and (at your command) removes the duplicates.
#
# It is meant to be run ONLY by FreeRADUS developers, and has nothing
# whatsoever to do with RADIUS, FreeRADIUS, or configuring a RADIUS server.
#
######################################################################
#
# Run as: ./min-includes.pl `find . -name "*.c" -print`
# prints out duplicate includes from files.
#
# ./min-includes.pl +n `find . -name "*.c" -print`
# removes the duplicate includes from each file.
# Remember to check that it still builds!
#
# It has to be run from the TOP of the FreeRADIUS build tree,
# i.e. where the top-level "configure" script is located.
#
######################################################################
#
# FIXME: We don't handle include files taken from the current
# directory...
#
# FIXME: we should take -I <path> from the command line.
#
######################################################################
#
# Copyright (C) 2006 Alan DeKok <aland@freeradius.org>
#
# $Id: 37044edbe80bf605a0ea00c94aba8ed11e683e87 $
#
######################################################################
my %processed;
$any_dups = 0;
$debug = 0;
#
# Find the #include's for one file.
#
sub process($) {
my $file = shift;
return if ($processed{$file});
$processed{$file}++;
open FILE, "<$file" or die "Failed to open $file: $!\n";
$line = 0;
while (<FILE>) {
$line++;
next if (!/^\s*\#\s*include\s+/);
if (/^\s*\#\s*include\s+"(.+?)"/) {
$refs{$file}{$1} = $line;
# FIXME: local header files?
# src/foo/bar.c: #include "foo.h"
# src/foo/foo.h do stuff..
$include{$1}++;
} elsif (/^\s*\#\s*include\s+<(.+?)>/) {
$refs{$file}{$1} = $line;
$include{$1}++;
}
}
close FILE;
}
#
# Where include files are located.
#
# FIXME:
#
@directories = ("src/lib", "src");
$do_it = 0;
#
# Horrid.
#
if ($ARGV[0] eq "+n") {
shift;
$do_it = 1;
}
#
# Bootstrap the basic C files.
#
foreach $file (@ARGV) {
process($file);
}
#
# Process the include files referenced from the C files, to find out
# what they include Note that we create a temporary array, rather
# than walking over %include, because the process() function adds
# entries to the %include hash.
#
@work = sort keys %include;
foreach $inc (@work) {
foreach $dir (@directories) {
$path = $dir . "/" . $inc;
# normalize path
$path =~ s:/.*?/\.\.::;
$path =~ s:/.*?/\.\.::;
next if (! -e $path);
process($path);
$forward{$inc} = $path;
$reverse{$path} = $inc;
# ignore system include files
next if ((scalar keys %{$refs{$path}}) == 0);
# Remember that X includes Y, and push Y onto the list
# of files to scan.
foreach $inc2 (sort keys %{$refs{$path}}) {
$maps{$inc}{$inc2} = 0;
push @work, $inc2;
}
}
}
#
# Process all of the forward refs, so that we have a complete
# list of who's referencing who.
#
# This doesn't find the shortest path from A to B, but it does
# find one path.
#
foreach $inc (sort keys %maps) {
foreach $inc2 (sort keys %{$maps{$inc}}) {
foreach $inc3 (sort keys %{$maps{$inc2}}) {
# map is already there...
next if (defined $maps{$inc}{$inc3});
$maps{$inc}{$inc3} = $maps{$inc2}{$inc3} + 1;
}
}
}
#
# Walk through the files again, looking for includes that are
# unnecessary. Note that we process header files, too.
#
foreach $file (sort keys %refs) {
# print out some debugging information.
if ($debug > 0) {
if (defined $reverse{$file}) {
print $file, "\t(", $reverse{$file}, ")\n";
} else {
print $file, "\n";
}
}
# walk of the list of include's in this file
foreach $ref (sort keys %{$refs{$file}}) {
# walk over the include files we include, or included by
# files that we include.
foreach $inc2 (sort keys %{$maps{$ref}}) {
#
# If we include X, and X includes Y, and we include
# Y ourselves *after* X, it's a definite dupe.
#
# Note that this is a *guaranteed* duplicate.
#
# Sometimes order matters, so we can't always delete X if
# we include Y after X, and Y includes X
#
if (defined $refs{$file}{$inc2} &&
($refs{$file}{$inc2} > $refs{$file}{$ref})) {
$duplicate{$file}{$inc2} = $ref;
# mark the line to be deleted.
$delete_line{$file}{$refs{$file}{$inc2}}++;
$any_dups++;
}
}
print "\t", $ref, "\n" if ($debug > 0);
}
}
if ($debug > 0) {
print "------------------------------------\n";
}
#
# Maybe just print out the dups so that a person can validate them.
#
if (!$do_it) {
foreach $file (sort keys %duplicate) {
print $file, "\n";
foreach $inc (sort keys %{$duplicate{$file}}) {
print "\t[", $refs{$file}{$inc}, "] ", $inc, " (", $duplicate{$file}{$inc}, " at ", $refs{$file}{$duplicate{$file}{$inc}}, ")\n";
}
}
} else {
foreach $file (sort keys %duplicate) {
open FILE, "<$file" or die "Failed to open $file: $!\n";
open OUTPUT, ">$file.tmp" or die "Failed to create $file.tmp: $!\n";
$line = 0;
while (<FILE>) {
$line++;
# supposed to delete this line, don't print it to the output.
next if (defined $delete_line{$file}{$line});
print OUTPUT;
}
rename "$file.tmp", $file;
}
}
# If we succeeded in re-writing the files, it's OK.
exit 0 if ($do_it);
# If there are no duplicates, then we're OK.
exit 0 if (!$any_dups);
# Else there are duplicates, complain.
exit 1
|