1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
|
#!/usr/bin/env python3
"""Copyright Joel Schaerer 2008-2012
This file is part of autojump
autojump is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
autojump is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with autojump. If not, see <http://www.gnu.org/licenses/>.
Autojump is a small tool that maintains a database of your most
used directories, and finds the best match to help you jump to
frequently used places."""
from __future__ import division, print_function
import argparse
from sys import argv, stderr, version_info, exit, getfilesystemencoding
from tempfile import NamedTemporaryFile
from operator import itemgetter
import os
import shutil
AUTOJUMP_VERSION = "release-v20"
MAX_KEYWEIGHT = 1000
MAX_STORED_PATHS = 600
COMPLETION_SEPARATOR = '__'
if "AUTOJUMP_DATA_DIR" in os.environ:
CONFIG_DIR = os.environ.get("AUTOJUMP_DATA_DIR")
else:
xdg_data_dir = os.environ.get('XDG_DATA_HOME') or os.path.join(os.environ['HOME'], '.local', 'share')
CONFIG_DIR=os.path.join(xdg_data_dir, 'autojump')
def uniqadd(collection, key):
"""Adds a key to a list only if it is not already present"""
if key not in collection:
collection.append(key)
def dicadd(dic, key, increment=1):
"""Increment a value in a dic, set it to 0
if is is not already present"""
dic[key] = dic.get(key, 0.)+increment
def output(unicode_text,encoding=None):
"""Wrapper for the print function, using the filesystem encoding by default
to minimize encoding mismatch problems in directory names"""
if version_info[0] > 2:
print(unicode_text)
else:
if encoding is None:
encoding = getfilesystemencoding()
print(unicode_text.encode(encoding))
def decode(text,encoding=None,errors="strict"):
"""Decoding step for python2.x which does not default to unicode"""
if version_info[0] > 2:
return text
else:
if encoding is None:
encoding = getfilesystemencoding()
return text.decode(encoding,errors)
def unico(text):
"""if python2, convert to a unicode object"""
if version_info[0] > 2:
return text
else:
return unicode(text)
def save(path_dict, dic_file):
"""Save the database in an atomic way, and preserve
a backup file."""
# If the dic_file exists and os supports permissions, check that dic_file belongs to us
# Otherwise, fail quietly
if (not os.path.exists(dic_file)) or os.name == 'nt' or os.getuid() == os.stat(dic_file)[4]:
temp = NamedTemporaryFile(dir=CONFIG_DIR, delete=False)
for path,weight in sorted(path_dict.items(),key=itemgetter(1),reverse=True):
# the db is stored in utf-8
temp.write((unico("%s\t%s\n")%(weight,path)).encode("utf-8"))
# Catching disk errors and skipping save since file handle can't be closed.
try:
#cf. http://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/
temp.flush()
os.fsync(temp)
temp.close()
except IOError as ex:
print("Error while saving autojump database (disk full?)" %
ex, file=stderr)
return
# Use shutil.move instead of os.rename because windows doesn't support
# using rename to overwrite files
shutil.move(temp.name, dic_file)
try: #backup file
import time
if (not os.path.exists(dic_file+".bak") or
time.time()-os.path.getmtime(dic_file+".bak")>86400):
shutil.copy(dic_file, dic_file+".bak")
except OSError as ex:
print("Error while creating backup autojump file. (%s)" %
ex, file=stderr)
def open_dic(dic_file, error_recovery=False):
"""Try hard to open the database file, recovering
from backup if needed. """
try:
path_dict = {}
with open(dic_file, 'r') as aj_file:
for l in aj_file.readlines():
weight,path = l[:-1].split("\t",1)
# the db is stored in utf-8
path = decode(path,"utf-8")
path_dict[path] = float(weight)
return path_dict
except (IOError, EOFError):
if not error_recovery and os.path.exists(dic_file+".bak"):
print('Problem with autojump database,\
trying to recover from backup...', file=stderr)
shutil.copy(dic_file+".bak", dic_file)
return open_dic(dic_file, True)
else:
# Temporary migration code
old_dic_file = get_dic_file("autojump_py")
if os.path.exists(old_dic_file):
try: # fix to get optimised pickle in python < 3
import cPickle as pickle
except ImportError:
import pickle
try:
with open(old_dic_file, 'rb') as aj_file:
if version_info[0] > 2:
#encoding is only specified for python2.x compatibility
path_dict = pickle.load(aj_file, encoding="utf-8")
else:
path_dict = pickle.load(aj_file)
unicode_dict = {} #we now use unicode internally
for k,v in path_dict.items():
print(k)
unicode_dict[decode(k,errors="replace")] = v
return unicode_dict
except (IOError, EOFError, pickle.UnpicklingError):
pass
return {} #if everything fails, return an empty file
def forget(path_dict, dic_file):
"""Gradually forget about directories. Only call
from the actual jump since it can take time"""
keyweight = sum(path_dict.values())
if keyweight > MAX_KEYWEIGHT:
for k in path_dict.keys():
path_dict[k] *= 0.9 * MAX_KEYWEIGHT / keyweight
save(path_dict, dic_file)
def clean_dict(sorted_dirs, path_dict):
"""Limits the sized of the path_dict to MAX_STORED_PATHS.
Returns True if keys were deleted"""
if len(sorted_dirs) > MAX_STORED_PATHS:
#remove 25 more than needed, to avoid doing it every time
for path, dummy in sorted_dirs[MAX_STORED_PATHS-25:]:
del path_dict[path]
return True
else: return False
def match(path, pattern, ignore_case=False, only_end=False):
"""Check whether a path matches a particular pattern, and return
the remaning part of the string"""
if only_end:
match_string = "/".join(path.split('/')[-1-pattern.count('/'):])
else:
match_string = path
if ignore_case:
find_idx = match_string.lower().find(pattern.lower())
else:
find_idx = match_string.find(pattern)
does_match = (find_idx != -1)
# Eat the path to avoid two patterns matching the same part of the string
if does_match:
eaten_path = path[find_idx+len(pattern):]
else:
eaten_path = path
return (does_match, eaten_path)
def find_matches(dirs, patterns, result_list, ignore_case, max_matches, current_dir):
"""Find max_matches paths that match the pattern,
and add them to the result_list"""
for path, count in dirs:
# Don't jump to where we alread are
if current_dir == path :
continue
does_match, eaten_path = True, path
for n,p in enumerate(patterns):
#For the last pattern, only match the end of the pattern
does_match, eaten_path = match(eaten_path, p, ignore_case, only_end=(n == len(patterns)-1))
if not does_match: break
#If a path doesn't exist, don't jump there
#We still keep it in db in case it's from a removable drive
if does_match and os.path.exists(path):
uniqadd(result_list, path)
if len(result_list) >= max_matches :
break
def get_dic_file(filename="autojump.txt"):
if CONFIG_DIR == os.path.expanduser("~"):
dic_file = CONFIG_DIR+"/." + filename
else:
dic_file = CONFIG_DIR+"/" + filename
return dic_file
def shell_utility():
"""Run this when autojump is called as a shell utility"""
parser = argparse.ArgumentParser(description='Automatically jump to directory passed as an argument.',
epilog="Please see autojump(1) man pages for full documentation.")
parser.add_argument('directory', metavar='DIR', nargs='*', default='',
help='directory to jump to')
parser.add_argument('-a', '--add', metavar='DIR',
help='manually add path to database')
parser.add_argument('-b', '--bash', action="store_true", default=False,
help='enclose directory quotes to prevent errors')
parser.add_argument('--completion', action="store_true", default=False,
help='prevent key weight decay over time')
parser.add_argument('--stat', action="store_true", default=False,
help='show database entries and their key weights')
parser.add_argument('--version', action="version", version="%(prog)s " + AUTOJUMP_VERSION,
help='show version information and exit')
args = parser.parse_args()
dic_file = get_dic_file()
path_dict = open_dic(dic_file)
# The home dir can be reached quickly by "cd" and may interfere with other directories
if (args.add):
if(args.add != os.path.expanduser("~")):
dicadd(path_dict, decode(args.add))
save(path_dict, dic_file)
return True
if (args.stat):
paths = list(path_dict.items())
paths.sort(key=itemgetter(1))
for path, count in paths[-100:]:
output(unico("%.1f:\t%s") % (count, path))
print("Total key weight: %d. Number of stored paths: %d" %
(sum(path_dict.values()), len(paths)))
return True
import re
#userchoice is i if the pattern is __pattern__i, otherwise -1
userchoice = -1
results = []
#default: gradually forget about old directories
if (not args.completion): forget(path_dict, dic_file)
if (args.directory == ''):
patterns = [unico("")]
else:
patterns = [decode(a) for a in args.directory]
# If the last pattern contains a full path, jump there
# The regexp is because we need to support stuff like
# "j wo jo__3__/home/joel/workspace/joel" for zsh
last_pattern_path = re.sub("(.*)"+COMPLETION_SEPARATOR, "", patterns[-1])
if (len(last_pattern_path)>0 and
last_pattern_path[0] == "/" and
os.path.exists(last_pattern_path)):
if not args.completion: output(last_pattern_path)
else:
#check for ongoing completion, and act accordingly
endmatch = re.search(COMPLETION_SEPARATOR+"([0-9]+)", patterns[-1])
if endmatch: #user has selected a completion
userchoice = int(endmatch.group(1))
patterns[-1] = re.sub(COMPLETION_SEPARATOR+"[0-9]+.*",
"", patterns[-1])
else: #user hasn't selected a completion, display the same choices again
endmatch = re.match("(.*)"+COMPLETION_SEPARATOR, patterns[-1])
if endmatch: patterns[-1] = endmatch.group(1)
dirs = list(path_dict.items())
dirs.sort(key=itemgetter(1), reverse=True)
if args.completion or userchoice != -1:
max_matches = 9
else:
max_matches = 1
# Don't jump to the current directory
try:
current_dir = decode(os.path.realpath(os.curdir))
#Sometimes the current path doesn't exist anymore.
#In that case, jump if possible.
except OSError:
current_dir = None
find_matches(dirs, patterns, results, False, max_matches, current_dir)
# If not found, try ignoring case.
# On completion always show all results
if args.completion or not results:
find_matches(dirs, patterns, results,
ignore_case=True,
max_matches=max_matches, current_dir=current_dir)
# Keep the database to a reasonable size
if not args.completion and clean_dict(dirs, path_dict):
save(path_dict, dic_file)
if args.completion and args.bash: quotes = "'"
else: quotes = ""
if userchoice != -1:
if len(results) > userchoice-1 :
output(unico("%s%s%s") % (quotes,results[userchoice-1],quotes))
elif len(results) > 1 and args.completion:
output("\n".join(("%s%s%d%s%s" % (patterns[-1],
COMPLETION_SEPARATOR, n+1, COMPLETION_SEPARATOR, r)
for n, r in enumerate(results[:8]))))
elif results: output(unico("%s%s%s")%(quotes,results[0],quotes))
else:
return False
return True
if __name__ == "__main__":
success=shell_utility()
if not success: exit(1)
|