# HG changeset patch # User Chris Mason # Date 1145404212 14400 # Node ID f71e9656524f1c88001c0e47404361523cfc999f # Parent d66278012853e1abd331a57726d26521bcbeb714# Parent 67a0a38520246c8148c583689840d74adcaeb8c8 merge revlogng with mpm tip diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -8,6 +8,7 @@ syntax: glob *.pyc *.swp *.prof +tests/.coverage* tests/*.err build dist diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,13 +2,13 @@ include hg recursive-include mercurial *.py include hgweb.cgi hgwebdir.cgi include hgeditor rewrite-log -include tests/README tests/run-tests tests/test-*[a-z0-9] tests/*.out +include tests/README tests/run-tests tests/md5sum.py tests/test-*[a-z0-9] tests/*.out prune tests/*.err include *.txt include templates/map templates/map-*[a-z0-9] include templates/*.tmpl include templates/static/* -include doc/README doc/Makefile doc/*.txt doc/*.html doc/*.[0-9] +include doc/README doc/Makefile doc/gendoc.py doc/*.txt doc/*.html doc/*.[0-9] recursive-include contrib * include README include CONTRIBUTORS diff --git a/contrib/hgk b/contrib/hgk --- a/contrib/hgk +++ b/contrib/hgk @@ -3448,7 +3448,7 @@ proc domktag {} { return } if {[catch { - set out [exec hg tag $tag $id] + set out [exec hg tag -r $id $tag] } err]} { error_popup "Error creating tag: $err" return diff --git a/mercurial/commands.py b/mercurial/commands.py --- a/mercurial/commands.py +++ b/mercurial/commands.py @@ -2259,7 +2259,7 @@ def recover(ui, repo): """ if repo.recover(): return repo.verify() - return False + return 1 def remove(ui, repo, pat, *pats, **opts): """remove the specified files on the next commit @@ -3259,38 +3259,32 @@ def dispatch(args): u = ui.ui() except util.Abort, inst: sys.stderr.write(_("abort: %s\n") % inst) - sys.exit(1) + return -1 external = [] for x in u.extensions(): - def on_exception(exc, inst): - u.warn(_("*** failed to import extension %s\n") % x[1]) - u.warn("%s\n" % inst) - if "--traceback" in sys.argv[1:]: - traceback.print_exc() - if x[1]: - try: + try: + if x[1]: mod = imp.load_source(x[0], x[1]) - except Exception, inst: - on_exception(Exception, inst) - continue - else: - def importh(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - try: + else: + def importh(name): + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod try: mod = importh("hgext." + x[0]) except ImportError: mod = importh(x[0]) - except Exception, inst: - on_exception(Exception, inst) - continue - - external.append(mod) + external.append(mod) + except Exception, inst: + u.warn(_("*** failed to import extension %s: %s\n") % (x[0], inst)) + if "--traceback" in sys.argv[1:]: + traceback.print_exc() + return 1 + continue + for x in external: cmdtable = getattr(x, 'cmdtable', {}) for t in cmdtable: @@ -3332,14 +3326,11 @@ def dispatch(args): repo = path and hg.repository(u, path=path) or None if options['help']: - help_(u, cmd, options['version']) - sys.exit(0) + return help_(u, cmd, options['version']) elif options['version']: - show_version(u) - sys.exit(0) + return show_version(u) elif not cmd: - help_(u, 'shortlist') - sys.exit(0) + return help_(u, 'shortlist') if cmd not in norepo.split(): try: @@ -3394,15 +3385,12 @@ def dispatch(args): else: u.warn(_("hg: %s\n") % inst.args[1]) help_(u, 'shortlist') - sys.exit(-1) except AmbiguousCommand, inst: u.warn(_("hg: command '%s' is ambiguous:\n %s\n") % (inst.args[0], " ".join(inst.args[1]))) - sys.exit(1) except UnknownCommand, inst: u.warn(_("hg: unknown command '%s'\n") % inst.args[0]) help_(u, 'shortlist') - sys.exit(1) except hg.RepoError, inst: u.warn(_("abort: "), inst, "!\n") except lock.LockHeld, inst: @@ -3449,7 +3437,6 @@ def dispatch(args): u.warn(_("abort: %s\n") % inst.strerror) except util.Abort, inst: u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n') - sys.exit(1) except TypeError, inst: # was this an argument error? tb = traceback.extract_tb(sys.exc_info()[2]) @@ -3458,9 +3445,10 @@ def dispatch(args): u.debug(inst, "\n") u.warn(_("%s: invalid arguments\n") % cmd) help_(u, cmd) - except SystemExit: - # don't catch this in the catch-all below - raise + except SystemExit, inst: + # Commands shouldn't sys.exit directly, but give a return code. + # Just in case catch this and and pass exit code to caller. + return inst.code except: u.warn(_("** unknown exception encountered, details follow\n")) u.warn(_("** report bug details to mercurial@selenic.com\n")) @@ -3468,4 +3456,4 @@ def dispatch(args): % version.get_version()) raise - sys.exit(-1) + return -1 diff --git a/mercurial/dirstate.py b/mercurial/dirstate.py --- a/mercurial/dirstate.py +++ b/mercurial/dirstate.py @@ -342,7 +342,16 @@ class dirstate(object): names.sort() # nd is the top of the repository dir tree nd = util.normpath(top[len(self.root) + 1:]) - if nd == '.': nd = '' + if nd == '.': + nd = '' + else: + # do not recurse into a repo contained in this + # one. use bisect to find .hg directory so speed + # is good on big directory. + hg = bisect.bisect_left(names, '.hg') + if hg < len(names) and names[hg] == '.hg': + if os.path.isdir(os.path.join(top, '.hg')): + continue for f in names: np = util.pconvert(os.path.join(nd, f)) if seen(np): diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py --- a/mercurial/localrepo.py +++ b/mercurial/localrepo.py @@ -1594,8 +1594,9 @@ class localrepository(object): self.ui.debug(_(" remote %s is newer, get\n") % f) get[f] = m2[f] s = 1 - elif f in umap: + elif f in umap or f in added: # this unknown file is the same as the checkout + # we need to reset the dirstate if the file was added get[f] = m2[f] if not s and mfw[f] != mf2[f]: diff --git a/mercurial/util.py b/mercurial/util.py --- a/mercurial/util.py +++ b/mercurial/util.py @@ -71,10 +71,23 @@ def filter(s, cmd): return fn(s, cmd[len(name):].lstrip()) return pipefilter(s, cmd) +def find_in_path(name, path, default=None): + '''find name in search path. path can be string (will be split + with os.pathsep), or iterable thing that returns strings. if name + found, return path to name. else return default.''' + if isinstance(path, str): + path = path.split(os.pathsep) + for p in path: + p_name = os.path.join(p, name) + if os.path.exists(p_name): + return p_name + return default + def patch(strip, patchname, ui): """apply the patch to the working directory. a list of patched files is returned""" - fp = os.popen('patch -p%d < "%s"' % (strip, patchname)) + patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch') + fp = os.popen('"%s" -p%d < "%s"' % (patcher, strip, patchname)) files = {} for line in fp: line = line.rstrip() @@ -373,8 +386,10 @@ def unlink(f): """unlink and remove the directory if it is empty""" os.unlink(f) # try removing directories that might now be empty - try: os.removedirs(os.path.dirname(f)) - except: pass + try: + os.removedirs(os.path.dirname(f)) + except OSError: + pass def copyfiles(src, dst, hardlink=None): """Copy a directory tree using hardlinks if possible""" @@ -530,18 +545,13 @@ if os.name == 'nt': sys.stdout = winstdout(sys.stdout) + def system_rcpath(): + return [r'c:\mercurial\mercurial.ini'] + def os_rcpath(): '''return default os-specific hgrc search path''' - try: - import win32api, win32process - proc = win32api.GetCurrentProcess() - filename = win32process.GetModuleFileNameEx(proc, 0) - systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') - except ImportError: - systemrc = r'c:\mercurial\mercurial.ini' - - return [systemrc, - os.path.join(os.path.expanduser('~'), 'mercurial.ini')] + return system_rcpath() + [os.path.join(os.path.expanduser('~'), + 'mercurial.ini')] def parse_patch_output(output_line): """parses the output produced by patch and returns the file name""" @@ -550,43 +560,9 @@ if os.name == 'nt': pf = pf[1:-1] # Remove the quotes return pf - try: # Mark Hammond's win32all package allows better functionality on Windows - import win32api, win32con, win32file, pywintypes - - # create hard links using win32file module - def os_link(src, dst): # NB will only succeed on NTFS - win32file.CreateHardLink(dst, src) - - def nlinks(pathname): - """Return number of hardlinks for the given file.""" - try: - fh = win32file.CreateFile(pathname, - win32file.GENERIC_READ, win32file.FILE_SHARE_READ, - None, win32file.OPEN_EXISTING, 0, None) - res = win32file.GetFileInformationByHandle(fh) - fh.Close() - return res[7] - except: - return os.stat(pathname).st_nlink - - def testpid(pid): - '''return True if pid is still running or unable to - determine, False otherwise''' - try: - import win32process, winerror - handle = win32api.OpenProcess( - win32con.PROCESS_QUERY_INFORMATION, False, pid) - if handle: - status = win32process.GetExitCodeProcess(handle) - return status == win32con.STILL_ACTIVE - except pywintypes.error, details: - return details[0] != winerror.ERROR_INVALID_PARAMETER - return True - - except ImportError: - def testpid(pid): - '''return False if pid dead, True if running or not known''' - return True + def testpid(pid): + '''return False if pid dead, True if running or not known''' + return True def is_exec(f, last): return last @@ -612,6 +588,12 @@ if os.name == 'nt': def explain_exit(code): return _("exited with status %d") % code, code + try: + # override functions with win32 versions if possible + from util_win32 import * + except ImportError: + pass + else: nulldev = '/dev/null' diff --git a/mercurial/util_win32.py b/mercurial/util_win32.py new file mode 100644 --- /dev/null +++ b/mercurial/util_win32.py @@ -0,0 +1,171 @@ +# util_win32.py - utility functions that use win32 API +# +# Copyright 2005 Matt Mackall +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of +# the GNU General Public License, incorporated herein by reference. + +# Mark Hammond's win32all package allows better functionality on +# Windows. this module overrides definitions in util.py. if not +# available, import of this module will fail, and generic code will be +# used. + +import win32api + +from demandload import * +from i18n import gettext as _ +demandload(globals(), 'errno os pywintypes win32con win32file win32process') +demandload(globals(), 'winerror') + +class WinError(OSError): + winerror_map = { + winerror.ERROR_ACCESS_DENIED: errno.EACCES, + winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES, + winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES, + winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY, + winerror.ERROR_ALREADY_EXISTS: errno.EEXIST, + winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE, + winerror.ERROR_BAD_COMMAND: errno.EIO, + winerror.ERROR_BAD_DEVICE: errno.ENODEV, + winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO, + winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC, + winerror.ERROR_BAD_FORMAT: errno.ENOEXEC, + winerror.ERROR_BAD_LENGTH: errno.EINVAL, + winerror.ERROR_BAD_PATHNAME: errno.ENOENT, + winerror.ERROR_BAD_PIPE: errno.EPIPE, + winerror.ERROR_BAD_UNIT: errno.ENODEV, + winerror.ERROR_BAD_USERNAME: errno.EINVAL, + winerror.ERROR_BROKEN_PIPE: errno.EPIPE, + winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG, + winerror.ERROR_BUSY: errno.EBUSY, + winerror.ERROR_BUSY_DRIVE: errno.EBUSY, + winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS, + winerror.ERROR_CANNOT_MAKE: errno.EACCES, + winerror.ERROR_CANTOPEN: errno.EIO, + winerror.ERROR_CANTREAD: errno.EIO, + winerror.ERROR_CANTWRITE: errno.EIO, + winerror.ERROR_CRC: errno.EIO, + winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES, + winerror.ERROR_DEVICE_IN_USE: errno.EBUSY, + winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV, + winerror.ERROR_DIRECTORY: errno.EINVAL, + winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY, + winerror.ERROR_DISK_CHANGE: errno.EIO, + winerror.ERROR_DISK_FULL: errno.ENOSPC, + winerror.ERROR_DRIVE_LOCKED: errno.EBUSY, + winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL, + winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC, + winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG, + winerror.ERROR_FILE_EXISTS: errno.EEXIST, + winerror.ERROR_FILE_INVALID: errno.ENODEV, + winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT, + winerror.ERROR_GEN_FAILURE: errno.EIO, + winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC, + winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM, + winerror.ERROR_INVALID_ACCESS: errno.EACCES, + winerror.ERROR_INVALID_ADDRESS: errno.EFAULT, + winerror.ERROR_INVALID_BLOCK: errno.EFAULT, + winerror.ERROR_INVALID_DATA: errno.EINVAL, + winerror.ERROR_INVALID_DRIVE: errno.ENODEV, + winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC, + winerror.ERROR_INVALID_FLAGS: errno.EINVAL, + winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS, + winerror.ERROR_INVALID_HANDLE: errno.EBADF, + winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES, + winerror.ERROR_INVALID_NAME: errno.EINVAL, + winerror.ERROR_INVALID_OWNER: errno.EINVAL, + winerror.ERROR_INVALID_PARAMETER: errno.EINVAL, + winerror.ERROR_INVALID_PASSWORD: errno.EPERM, + winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL, + winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL, + winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO, + winerror.ERROR_INVALID_WORKSTATION: errno.EACCES, + winerror.ERROR_IO_DEVICE: errno.EIO, + winerror.ERROR_IO_INCOMPLETE: errno.EINTR, + winerror.ERROR_LOCKED: errno.EBUSY, + winerror.ERROR_LOCK_VIOLATION: errno.EACCES, + winerror.ERROR_LOGON_FAILURE: errno.EACCES, + winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL, + winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG, + winerror.ERROR_MORE_DATA: errno.EPIPE, + winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE, + winerror.ERROR_NOACCESS: errno.EFAULT, + winerror.ERROR_NONE_MAPPED: errno.EINVAL, + winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM, + winerror.ERROR_NOT_READY: errno.EAGAIN, + winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV, + winerror.ERROR_NO_DATA: errno.EPIPE, + winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO, + winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN, + winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES, + winerror.ERROR_OPEN_FAILED: errno.EIO, + winerror.ERROR_OPEN_FILES: errno.EBUSY, + winerror.ERROR_OPERATION_ABORTED: errno.EINTR, + winerror.ERROR_OUTOFMEMORY: errno.ENOMEM, + winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES, + winerror.ERROR_PATH_BUSY: errno.EBUSY, + winerror.ERROR_PATH_NOT_FOUND: errno.ENOTDIR, + winerror.ERROR_PIPE_BUSY: errno.EBUSY, + winerror.ERROR_PIPE_CONNECTED: errno.EPIPE, + winerror.ERROR_PIPE_LISTENING: errno.EPIPE, + winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE, + winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES, + winerror.ERROR_READ_FAULT: errno.EIO, + winerror.ERROR_SEEK: errno.EIO, + winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE, + winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE, + winerror.ERROR_SHARING_VIOLATION: errno.EACCES, + winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM, + winerror.ERROR_SWAPERROR: errno.ENOENT, + winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE, + winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE, + winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO, + winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV, + winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD, + winerror.ERROR_WRITE_FAULT: errno.EIO, + winerror.ERROR_WRITE_PROTECT: errno.EROFS, + } + + def __init__(self, err): + self.win_errno, self.win_function, self.win_strerror = err + OSError.__init__(self, self.winerror_map.get(self.win_errno, 0), + self.win_strerror) + +def os_link(src, dst): + # NB will only succeed on NTFS + try: + win32file.CreateHardLink(dst, src) + except pywintypes.error, details: + raise WinError(details) + +def nlinks(pathname): + """Return number of hardlinks for the given file.""" + try: + fh = win32file.CreateFile(pathname, + win32file.GENERIC_READ, win32file.FILE_SHARE_READ, + None, win32file.OPEN_EXISTING, 0, None) + res = win32file.GetFileInformationByHandle(fh) + fh.Close() + return res[7] + except pywintypes.error: + return os.stat(pathname).st_nlink + +def testpid(pid): + '''return True if pid is still running or unable to + determine, False otherwise''' + try: + handle = win32api.OpenProcess( + win32con.PROCESS_QUERY_INFORMATION, False, pid) + if handle: + status = win32process.GetExitCodeProcess(handle) + return status == win32con.STILL_ACTIVE + except pywintypes.error, details: + return details[0] != winerror.ERROR_INVALID_PARAMETER + return True + +def system_rcpath(): + '''return default os-specific hgrc search path''' + proc = win32api.GetCurrentProcess() + filename = win32process.GetModuleFileNameEx(proc, 0) + return [os.path.join(os.path.dirname(filename), 'mercurial.ini')] diff --git a/tests/coverage.py b/tests/coverage.py new file mode 100755 --- /dev/null +++ b/tests/coverage.py @@ -0,0 +1,890 @@ +#!/usr/bin/python +# +# Perforce Defect Tracking Integration Project +# +# +# COVERAGE.PY -- COVERAGE TESTING +# +# Gareth Rees, Ravenbrook Limited, 2001-12-04 +# Ned Batchelder, 2004-12-12 +# http://nedbatchelder.com/code/modules/coverage.html +# +# +# 1. INTRODUCTION +# +# This module provides coverage testing for Python code. +# +# The intended readership is all Python developers. +# +# This document is not confidential. +# +# See [GDR 2001-12-04a] for the command-line interface, programmatic +# interface and limitations. See [GDR 2001-12-04b] for requirements and +# design. + +"""Usage: + +coverage.py -x MODULE.py [ARG1 ARG2 ...] + Execute module, passing the given command-line arguments, collecting + coverage data. + +coverage.py -e + Erase collected coverage data. + +coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ... + Report on the statement coverage for the given files. With the -m + option, show line numbers of the statements that weren't executed. + +coverage.py -a [-d dir] [-o dir1,dir2,...] FILE1 FILE2 ... + Make annotated copies of the given files, marking statements that + are executed with > and statements that are missed with !. With + the -d option, make the copies in that directory. Without the -d + option, make each copy in the same directory as the original. + +-o dir,dir2,... + Omit reporting or annotating files when their filename path starts with + a directory listed in the omit list. + e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits + +Coverage data is saved in the file .coverage by default. Set the +COVERAGE_FILE environment variable to save it somewhere else.""" + +__version__ = "2.5.20051204" # see detailed history at the end of this file. + +import compiler +import compiler.visitor +import os +import re +import string +import sys +import threading +import types + +# 2. IMPLEMENTATION +# +# This uses the "singleton" pattern. +# +# The word "morf" means a module object (from which the source file can +# be deduced by suitable manipulation of the __file__ attribute) or a +# filename. +# +# When we generate a coverage report we have to canonicalize every +# filename in the coverage dictionary just in case it refers to the +# module we are reporting on. It seems a shame to throw away this +# information so the data in the coverage dictionary is transferred to +# the 'cexecuted' dictionary under the canonical filenames. +# +# The coverage dictionary is called "c" and the trace function "t". The +# reason for these short names is that Python looks up variables by name +# at runtime and so execution time depends on the length of variables! +# In the bottleneck of this application it's appropriate to abbreviate +# names to increase speed. + +class StatementFindingAstVisitor(compiler.visitor.ASTVisitor): + def __init__(self, statements, excluded, suite_spots): + compiler.visitor.ASTVisitor.__init__(self) + self.statements = statements + self.excluded = excluded + self.suite_spots = suite_spots + self.excluding_suite = 0 + + def doRecursive(self, node): + self.recordNodeLine(node) + for n in node.getChildNodes(): + self.dispatch(n) + + visitStmt = visitModule = doRecursive + + def doCode(self, node): + if hasattr(node, 'decorators') and node.decorators: + self.dispatch(node.decorators) + self.doSuite(node, node.code) + + visitFunction = visitClass = doCode + + def getFirstLine(self, node): + # Find the first line in the tree node. + lineno = node.lineno + for n in node.getChildNodes(): + f = self.getFirstLine(n) + if lineno and f: + lineno = min(lineno, f) + else: + lineno = lineno or f + return lineno + + def getLastLine(self, node): + # Find the first line in the tree node. + lineno = node.lineno + for n in node.getChildNodes(): + lineno = max(lineno, self.getLastLine(n)) + return lineno + + def doStatement(self, node): + self.recordLine(self.getFirstLine(node)) + + visitAssert = visitAssign = visitAssTuple = visitDiscard = visitPrint = \ + visitPrintnl = visitRaise = visitSubscript = visitDecorators = \ + doStatement + + def recordNodeLine(self, node): + return self.recordLine(node.lineno) + + def recordLine(self, lineno): + # Returns a bool, whether the line is included or excluded. + if lineno: + # Multi-line tests introducing suites have to get charged to their + # keyword. + if lineno in self.suite_spots: + lineno = self.suite_spots[lineno][0] + # If we're inside an exluded suite, record that this line was + # excluded. + if self.excluding_suite: + self.excluded[lineno] = 1 + return 0 + # If this line is excluded, or suite_spots maps this line to + # another line that is exlcuded, then we're excluded. + elif self.excluded.has_key(lineno) or \ + self.suite_spots.has_key(lineno) and \ + self.excluded.has_key(self.suite_spots[lineno][1]): + return 0 + # Otherwise, this is an executable line. + else: + self.statements[lineno] = 1 + return 1 + return 0 + + default = recordNodeLine + + def recordAndDispatch(self, node): + self.recordNodeLine(node) + self.dispatch(node) + + def doSuite(self, intro, body, exclude=0): + exsuite = self.excluding_suite + if exclude or (intro and not self.recordNodeLine(intro)): + self.excluding_suite = 1 + self.recordAndDispatch(body) + self.excluding_suite = exsuite + + def doPlainWordSuite(self, prevsuite, suite): + # Finding the exclude lines for else's is tricky, because they aren't + # present in the compiler parse tree. Look at the previous suite, + # and find its last line. If any line between there and the else's + # first line are excluded, then we exclude the else. + lastprev = self.getLastLine(prevsuite) + firstelse = self.getFirstLine(suite) + for l in range(lastprev+1, firstelse): + if self.suite_spots.has_key(l): + self.doSuite(None, suite, exclude=self.excluded.has_key(l)) + break + else: + self.doSuite(None, suite) + + def doElse(self, prevsuite, node): + if node.else_: + self.doPlainWordSuite(prevsuite, node.else_) + + def visitFor(self, node): + self.doSuite(node, node.body) + self.doElse(node.body, node) + + def visitIf(self, node): + # The first test has to be handled separately from the rest. + # The first test is credited to the line with the "if", but the others + # are credited to the line with the test for the elif. + self.doSuite(node, node.tests[0][1]) + for t, n in node.tests[1:]: + self.doSuite(t, n) + self.doElse(node.tests[-1][1], node) + + def visitWhile(self, node): + self.doSuite(node, node.body) + self.doElse(node.body, node) + + def visitTryExcept(self, node): + self.doSuite(node, node.body) + for i in range(len(node.handlers)): + a, b, h = node.handlers[i] + if not a: + # It's a plain "except:". Find the previous suite. + if i > 0: + prev = node.handlers[i-1][2] + else: + prev = node.body + self.doPlainWordSuite(prev, h) + else: + self.doSuite(a, h) + self.doElse(node.handlers[-1][2], node) + + def visitTryFinally(self, node): + self.doSuite(node, node.body) + self.doPlainWordSuite(node.body, node.final) + + def visitGlobal(self, node): + # "global" statements don't execute like others (they don't call the + # trace function), so don't record their line numbers. + pass + +the_coverage = None + +class coverage: + error = "coverage error" + + # Name of the cache file (unless environment variable is set). + cache_default = ".coverage" + + # Environment variable naming the cache file. + cache_env = "COVERAGE_FILE" + + # A dictionary with an entry for (Python source file name, line number + # in that file) if that line has been executed. + c = {} + + # A map from canonical Python source file name to a dictionary in + # which there's an entry for each line number that has been + # executed. + cexecuted = {} + + # Cache of results of calling the analysis2() method, so that you can + # specify both -r and -a without doing double work. + analysis_cache = {} + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + canonical_filename_cache = {} + + def __init__(self): + global the_coverage + if the_coverage: + raise self.error, "Only one coverage object allowed." + self.usecache = 1 + self.cache = None + self.exclude_re = '' + self.nesting = 0 + self.cstack = [] + self.xstack = [] + self.relative_dir = os.path.normcase(os.path.abspath(os.curdir)+os.path.sep) + + # t(f, x, y). This method is passed to sys.settrace as a trace function. + # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and + # the arguments and return value of the trace function. + # See [van Rossum 2001-07-20a, 3.2] for a description of frame and code + # objects. + + def t(self, f, w, a): #pragma: no cover + #print w, f.f_code.co_filename, f.f_lineno + if w == 'line': + self.c[(f.f_code.co_filename, f.f_lineno)] = 1 + for c in self.cstack: + c[(f.f_code.co_filename, f.f_lineno)] = 1 + return self.t + + def help(self, error=None): + if error: + print error + print + print __doc__ + sys.exit(1) + + def command_line(self): + import getopt + settings = {} + optmap = { + '-a': 'annotate', + '-d:': 'directory=', + '-e': 'erase', + '-h': 'help', + '-i': 'ignore-errors', + '-m': 'show-missing', + '-r': 'report', + '-x': 'execute', + '-o': 'omit=', + } + short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '') + long_opts = optmap.values() + options, args = getopt.getopt(sys.argv[1:], short_opts, long_opts) + for o, a in options: + if optmap.has_key(o): + settings[optmap[o]] = 1 + elif optmap.has_key(o + ':'): + settings[optmap[o + ':']] = a + elif o[2:] in long_opts: + settings[o[2:]] = 1 + elif o[2:] + '=' in long_opts: + settings[o[2:]] = a + else: + self.help("Unknown option: '%s'." % o) + if settings.get('help'): + self.help() + for i in ['erase', 'execute']: + for j in ['annotate', 'report']: + if settings.get(i) and settings.get(j): + self.help("You can't specify the '%s' and '%s' " + "options at the same time." % (i, j)) + args_needed = (settings.get('execute') + or settings.get('annotate') + or settings.get('report')) + action = settings.get('erase') or args_needed + if not action: + self.help("You must specify at least one of -e, -x, -r, or -a.") + if not args_needed and args: + self.help("Unexpected arguments %s." % args) + + self.get_ready() + self.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') + + if settings.get('erase'): + self.erase() + if settings.get('execute'): + if not args: + self.help("Nothing to do.") + sys.argv = args + self.start() + import __main__ + sys.path[0] = os.path.dirname(sys.argv[0]) + execfile(sys.argv[0], __main__.__dict__) + if not args: + args = self.cexecuted.keys() + ignore_errors = settings.get('ignore-errors') + show_missing = settings.get('show-missing') + directory = settings.get('directory') + omit = filter(None, settings.get('omit', '').split(',')) + + if settings.get('report'): + self.report(args, show_missing, ignore_errors, omit_prefixes=omit) + if settings.get('annotate'): + self.annotate(args, directory, ignore_errors, omit_prefixes=omit) + + def use_cache(self, usecache): + self.usecache = usecache + + def get_ready(self): + if self.usecache and not self.cache: + self.cache = os.path.abspath(os.environ.get(self.cache_env, + self.cache_default)) + self.restore() + self.analysis_cache = {} + + def start(self): + self.get_ready() + if self.nesting == 0: #pragma: no cover + sys.settrace(self.t) + if hasattr(threading, 'settrace'): + threading.settrace(self.t) + self.nesting += 1 + + def stop(self): + self.nesting -= 1 + if self.nesting == 0: #pragma: no cover + sys.settrace(None) + if hasattr(threading, 'settrace'): + threading.settrace(None) + + def erase(self): + self.c = {} + self.analysis_cache = {} + self.cexecuted = {} + if self.cache and os.path.exists(self.cache): + os.remove(self.cache) + self.exclude_re = "" + + def exclude(self, re): + if self.exclude_re: + self.exclude_re += "|" + self.exclude_re += "(" + re + ")" + + def begin_recursive(self): + self.cstack.append(self.c) + self.xstack.append(self.exclude_re) + + def end_recursive(self): + self.c = self.cstack.pop() + self.exclude_re = self.xstack.pop() + + # save(). Save coverage data to the coverage cache. + + def save(self): + # move to directory that must exist. + os.chdir(os.sep) + if self.usecache and self.cache: + self.canonicalize_filenames() + cache = open(self.cache, 'wb') + import marshal + marshal.dump(self.cexecuted, cache) + cache.close() + + # restore(). Restore coverage data from the coverage cache (if it exists). + + def restore(self): + self.c = {} + self.cexecuted = {} + assert self.usecache + if not os.path.exists(self.cache): + return + try: + cache = open(self.cache, 'rb') + import marshal + cexecuted = marshal.load(cache) + cache.close() + if isinstance(cexecuted, types.DictType): + self.cexecuted = cexecuted + except: + pass + + # canonical_filename(filename). Return a canonical filename for the + # file (that is, an absolute path with no redundant components and + # normalized case). See [GDR 2001-12-04b, 3.3]. + + def canonical_filename(self, filename): + if not self.canonical_filename_cache.has_key(filename): + f = filename + if os.path.isabs(f) and not os.path.exists(f): + f = os.path.basename(f) + if not os.path.isabs(f): + for path in [os.curdir] + sys.path: + g = os.path.join(path, f) + if os.path.exists(g): + f = g + break + cf = os.path.normcase(os.path.abspath(f)) + self.canonical_filename_cache[filename] = cf + return self.canonical_filename_cache[filename] + + # canonicalize_filenames(). Copy results from "c" to "cexecuted", + # canonicalizing filenames on the way. Clear the "c" map. + + def canonicalize_filenames(self): + for filename, lineno in self.c.keys(): + f = self.canonical_filename(filename) + if not self.cexecuted.has_key(f): + self.cexecuted[f] = {} + self.cexecuted[f][lineno] = 1 + self.c = {} + + # morf_filename(morf). Return the filename for a module or file. + + def morf_filename(self, morf): + if isinstance(morf, types.ModuleType): + if not hasattr(morf, '__file__'): + raise self.error, "Module has no __file__ attribute." + file = morf.__file__ + else: + file = morf + return self.canonical_filename(file) + + # analyze_morf(morf). Analyze the module or filename passed as + # the argument. If the source code can't be found, raise an error. + # Otherwise, return a tuple of (1) the canonical filename of the + # source code for the module, (2) a list of lines of statements + # in the source code, and (3) a list of lines of excluded statements. + + def analyze_morf(self, morf): + if self.analysis_cache.has_key(morf): + return self.analysis_cache[morf] + filename = self.morf_filename(morf) + ext = os.path.splitext(filename)[1] + if ext == '.pyc': + if not os.path.exists(filename[0:-1]): + raise self.error, ("No source for compiled code '%s'." + % filename) + filename = filename[0:-1] + elif ext != '.py': + raise self.error, "File '%s' not Python source." % filename + source = open(filename, 'r') + lines, excluded_lines = self.find_executable_statements( + source.read(), exclude=self.exclude_re + ) + source.close() + result = filename, lines, excluded_lines + self.analysis_cache[morf] = result + return result + + def get_suite_spots(self, tree, spots): + import symbol, token + for i in range(1, len(tree)): + if type(tree[i]) == type(()): + if tree[i][0] == symbol.suite: + # Found a suite, look back for the colon and keyword. + lineno_colon = lineno_word = None + for j in range(i-1, 0, -1): + if tree[j][0] == token.COLON: + lineno_colon = tree[j][2] + elif tree[j][0] == token.NAME: + if tree[j][1] == 'elif': + # Find the line number of the first non-terminal + # after the keyword. + t = tree[j+1] + while t and token.ISNONTERMINAL(t[0]): + t = t[1] + if t: + lineno_word = t[2] + else: + lineno_word = tree[j][2] + break + elif tree[j][0] == symbol.except_clause: + # "except" clauses look like: + # ('except_clause', ('NAME', 'except', lineno), ...) + if tree[j][1][0] == token.NAME: + lineno_word = tree[j][1][2] + break + if lineno_colon and lineno_word: + # Found colon and keyword, mark all the lines + # between the two with the two line numbers. + for l in range(lineno_word, lineno_colon+1): + spots[l] = (lineno_word, lineno_colon) + self.get_suite_spots(tree[i], spots) + + def find_executable_statements(self, text, exclude=None): + # Find lines which match an exclusion pattern. + excluded = {} + suite_spots = {} + if exclude: + reExclude = re.compile(exclude) + lines = text.split('\n') + for i in range(len(lines)): + if reExclude.search(lines[i]): + excluded[i+1] = 1 + + import parser + tree = parser.suite(text+'\n\n').totuple(1) + self.get_suite_spots(tree, suite_spots) + + # Use the compiler module to parse the text and find the executable + # statements. We add newlines to be impervious to final partial lines. + statements = {} + ast = compiler.parse(text+'\n\n') + visitor = StatementFindingAstVisitor(statements, excluded, suite_spots) + compiler.walk(ast, visitor, walker=visitor) + + lines = statements.keys() + lines.sort() + excluded_lines = excluded.keys() + excluded_lines.sort() + return lines, excluded_lines + + # format_lines(statements, lines). Format a list of line numbers + # for printing by coalescing groups of lines as long as the lines + # represent consecutive statements. This will coalesce even if + # there are gaps between statements, so if statements = + # [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then + # format_lines will return "1-2, 5-11, 13-14". + + def format_lines(self, statements, lines): + pairs = [] + i = 0 + j = 0 + start = None + pairs = [] + while i < len(statements) and j < len(lines): + if statements[i] == lines[j]: + if start == None: + start = lines[j] + end = lines[j] + j = j + 1 + elif start: + pairs.append((start, end)) + start = None + i = i + 1 + if start: + pairs.append((start, end)) + def stringify(pair): + start, end = pair + if start == end: + return "%d" % start + else: + return "%d-%d" % (start, end) + return string.join(map(stringify, pairs), ", ") + + # Backward compatibility with version 1. + def analysis(self, morf): + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2(self, morf): + filename, statements, excluded = self.analyze_morf(morf) + self.canonicalize_filenames() + if not self.cexecuted.has_key(filename): + self.cexecuted[filename] = {} + missing = [] + for line in statements: + if not self.cexecuted[filename].has_key(line): + missing.append(line) + return (filename, statements, excluded, missing, + self.format_lines(statements, missing)) + + def relative_filename(self, filename): + """ Convert filename to relative filename from self.relative_dir. + """ + return filename.replace(self.relative_dir, "") + + def morf_name(self, morf): + """ Return the name of morf as used in report. + """ + if isinstance(morf, types.ModuleType): + return morf.__name__ + else: + return self.relative_filename(os.path.splitext(morf)[0]) + + def filter_by_prefix(self, morfs, omit_prefixes): + """ Return list of morfs where the morf name does not begin + with any one of the omit_prefixes. + """ + filtered_morfs = [] + for morf in morfs: + for prefix in omit_prefixes: + if self.morf_name(morf).startswith(prefix): + break + else: + filtered_morfs.append(morf) + + return filtered_morfs + + def morf_name_compare(self, x, y): + return cmp(self.morf_name(x), self.morf_name(y)) + + def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]): + if not isinstance(morfs, types.ListType): + morfs = [morfs] + morfs = self.filter_by_prefix(morfs, omit_prefixes) + morfs.sort(self.morf_name_compare) + + max_name = max([5,] + map(len, map(self.morf_name, morfs))) + fmt_name = "%%- %ds " % max_name + fmt_err = fmt_name + "%s: %s" + header = fmt_name % "Name" + " Stmts Exec Cover" + fmt_coverage = fmt_name + "% 6d % 6d % 5d%%" + if show_missing: + header = header + " Missing" + fmt_coverage = fmt_coverage + " %s" + if not file: + file = sys.stdout + print >>file, header + print >>file, "-" * len(header) + total_statements = 0 + total_executed = 0 + for morf in morfs: + name = self.morf_name(morf) + try: + _, statements, _, missing, readable = self.analysis2(morf) + n = len(statements) + m = n - len(missing) + if n > 0: + pc = 100.0 * m / n + else: + pc = 100.0 + args = (name, n, m, pc) + if show_missing: + args = args + (readable,) + print >>file, fmt_coverage % args + total_statements = total_statements + n + total_executed = total_executed + m + except KeyboardInterrupt: #pragma: no cover + raise + except: + if not ignore_errors: + type, msg = sys.exc_info()[0:2] + print >>file, fmt_err % (name, type, msg) + if len(morfs) > 1: + print >>file, "-" * len(header) + if total_statements > 0: + pc = 100.0 * total_executed / total_statements + else: + pc = 100.0 + args = ("TOTAL", total_statements, total_executed, pc) + if show_missing: + args = args + ("",) + print >>file, fmt_coverage % args + + # annotate(morfs, ignore_errors). + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def annotate(self, morfs, directory=None, ignore_errors=0, omit_prefixes=[]): + morfs = self.filter_by_prefix(morfs, omit_prefixes) + for morf in morfs: + try: + filename, statements, excluded, missing, _ = self.analysis2(morf) + self.annotate_file(filename, statements, excluded, missing, directory) + except KeyboardInterrupt: + raise + except: + if not ignore_errors: + raise + + def annotate_file(self, filename, statements, excluded, missing, directory=None): + source = open(filename, 'r') + if directory: + dest_file = os.path.join(directory, + os.path.basename(filename) + + ',cover') + else: + dest_file = filename + ',cover' + dest = open(dest_file, 'w') + lineno = 0 + i = 0 + j = 0 + covered = 1 + while 1: + line = source.readline() + if line == '': + break + lineno = lineno + 1 + while i < len(statements) and statements[i] < lineno: + i = i + 1 + while j < len(missing) and missing[j] < lineno: + j = j + 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(' ') + elif self.else_re.match(line): + # Special logic for lines containing only 'else:'. + # See [GDR 2001-12-04b, 3.2]. + if i >= len(statements) and j >= len(missing): + dest.write('! ') + elif i >= len(statements) or j >= len(missing): + dest.write('> ') + elif statements[i] == missing[j]: + dest.write('! ') + else: + dest.write('> ') + elif lineno in excluded: + dest.write('- ') + elif covered: + dest.write('> ') + else: + dest.write('! ') + dest.write(line) + source.close() + dest.close() + +# Singleton object. +the_coverage = coverage() + +# Module functions call methods in the singleton object. +def use_cache(*args, **kw): return the_coverage.use_cache(*args, **kw) +def start(*args, **kw): return the_coverage.start(*args, **kw) +def stop(*args, **kw): return the_coverage.stop(*args, **kw) +def erase(*args, **kw): return the_coverage.erase(*args, **kw) +def begin_recursive(*args, **kw): return the_coverage.begin_recursive(*args, **kw) +def end_recursive(*args, **kw): return the_coverage.end_recursive(*args, **kw) +def exclude(*args, **kw): return the_coverage.exclude(*args, **kw) +def analysis(*args, **kw): return the_coverage.analysis(*args, **kw) +def analysis2(*args, **kw): return the_coverage.analysis2(*args, **kw) +def report(*args, **kw): return the_coverage.report(*args, **kw) +def annotate(*args, **kw): return the_coverage.annotate(*args, **kw) +def annotate_file(*args, **kw): return the_coverage.annotate_file(*args, **kw) + +# Save coverage data when Python exits. (The atexit module wasn't +# introduced until Python 2.0, so use sys.exitfunc when it's not +# available.) +try: + import atexit + atexit.register(the_coverage.save) +except ImportError: + sys.exitfunc = the_coverage.save + +# Command-line interface. +if __name__ == '__main__': + the_coverage.command_line() + + +# A. REFERENCES +# +# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees; +# Ravenbrook Limited; 2001-12-04; +# . +# +# [GDR 2001-12-04b] "Statement coverage for Python: design and +# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04; +# . +# +# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)"; +# Guide van Rossum; 2001-07-20; +# . +# +# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum; +# 2001-07-20; . +# +# +# B. DOCUMENT HISTORY +# +# 2001-12-04 GDR Created. +# +# 2001-12-06 GDR Added command-line interface and source code +# annotation. +# +# 2001-12-09 GDR Moved design and interface to separate documents. +# +# 2001-12-10 GDR Open cache file as binary on Windows. Allow +# simultaneous -e and -x, or -a and -r. +# +# 2001-12-12 GDR Added command-line help. Cache analysis so that it +# only needs to be done once when you specify -a and -r. +# +# 2001-12-13 GDR Improved speed while recording. Portable between +# Python 1.5.2 and 2.1.1. +# +# 2002-01-03 GDR Module-level functions work correctly. +# +# 2002-01-07 GDR Update sys.path when running a file with the -x option, +# so that it matches the value the program would get if it were run on +# its own. +# +# 2004-12-12 NMB Significant code changes. +# - Finding executable statements has been rewritten so that docstrings and +# other quirks of Python execution aren't mistakenly identified as missing +# lines. +# - Lines can be excluded from consideration, even entire suites of lines. +# - The filesystem cache of covered lines can be disabled programmatically. +# - Modernized the code. +# +# 2004-12-14 NMB Minor tweaks. Return 'analysis' to its original behavior +# and add 'analysis2'. Add a global for 'annotate', and factor it, adding +# 'annotate_file'. +# +# 2004-12-31 NMB Allow for keyword arguments in the module global functions. +# Thanks, Allen. +# +# 2005-12-02 NMB Call threading.settrace so that all threads are measured. +# Thanks Martin Fuzzey. Add a file argument to report so that reports can be +# captured to a different destination. +# +# 2005-12-03 NMB coverage.py can now measure itself. +# +# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames, +# and sorting and omitting files to report on. +# +# C. COPYRIGHT AND LICENCE +# +# Copyright 2001 Gareth Rees. All rights reserved. +# Copyright 2004-2005 Ned Batchelder. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. +# +# $Id: coverage.py 26 2005-12-04 18:42:44Z ned $ diff --git a/tests/run-tests b/tests/run-tests --- a/tests/run-tests +++ b/tests/run-tests @@ -1,4 +1,9 @@ #!/bin/sh -e +# +# environment variables: +# +# TEST_COVERAGE - set non-empty if you want to print test coverage report +# COVERAGE_STDLIB - set non-empty to report coverage of standard library LANG="C"; export LANG LC_CTYPE="C"; export LC_CTYPE @@ -19,6 +24,16 @@ HGEDITOR=true; export HGEDITOR HGMERGE=true; export HGMERGE HGUSER="test"; export HGUSER HGRCPATH=""; export HGRCPATH +OS=`uname` + +case "$OS" in + HP-UX|SunOS) + DIFFOPTS= + ;; + *) + DIFFOPTS=-u + ;; +esac if [ `echo -n HG` = "-n HG" ] then @@ -64,7 +79,19 @@ else fi cd "$TESTDIR" -BINDIR="$INST/bin" +BINDIR="$INST/bin"; export BINDIR +if [ -n "$TEST_COVERAGE" ]; then + COVERAGE_FILE="$TESTDIR/.coverage"; export COVERAGE_FILE + rm -f "$COVERAGE_FILE" + mv "$BINDIR/hg" "$BINDIR/hg.py" + { + echo '#!/bin/sh' + echo "exec \"${PYTHON-python}\" \"$TESTDIR/coverage.py\"" \ + "-x \"$BINDIR/hg.py\" \"\$@\"" + } > "$BINDIR/hg" + chmod 700 "$BINDIR/hg" +fi + PATH="$BINDIR:$PATH"; export PATH if [ -n "$PYTHON" ]; then { @@ -101,13 +128,13 @@ run_one() { cat "$ERR" fail=1 elif [ -r "$OUTOK" ]; then - if diff -u "$OUTOK" "$OUT" > /dev/null; then + if diff $DIFFOPTS "$OUTOK" "$OUT" > /dev/null; then : no differences else cp "$OUT" "$ERR" echo echo "$1 output changed:" - diff -u "$OUTOK" "$ERR" || true + diff $DIFFOPTS "$OUTOK" "$ERR" || true fail=1 fi fi @@ -153,6 +180,17 @@ done echo echo "Ran $tests tests, $failed failed." +if [ -n "$TEST_COVERAGE" ]; then + unset PYTHONPATH + $ECHO_N "$BINDIR,$TESTDIR,$HGTMP/test-," > "$HGTMP/omit" + if [ -z "$COVERAGE_STDLIB" ]; then + "${PYTHON-python}" -c 'import sys; print ",".join(sys.path)' \ + >> "$HGTMP/omit" + fi + cd "$PYTHONDIR" + "${PYTHON-python}" "$TESTDIR/coverage.py" -r --omit="`cat \"$HGTMP/omit\"`" +fi + if [ $failed -gt 0 ] ; then exit 1 fi diff --git a/tests/test-clone-failure.out b/tests/test-clone-failure.out --- a/tests/test-clone-failure.out +++ b/tests/test-clone-failure.out @@ -6,7 +6,7 @@ 255 abort: repository a not found! 255 abort: destination '../a' already exists -1 +255 abort: repository a not found! 255 abort: destination 'q' already exists diff --git a/tests/test-nested-repo b/tests/test-nested-repo new file mode 100755 --- /dev/null +++ b/tests/test-nested-repo @@ -0,0 +1,19 @@ +#!/bin/sh + +hg init a +cd a +hg init b +echo x > b/x +echo '# should print nothing' +hg st +echo '# should print ? b/x' +hg st b/x + +hg add b/x + +echo '# should print A b/x' +hg st +echo '# should forget b/x' +hg forget +echo '# should print nothing' +hg st b diff --git a/tests/test-nested-repo.out b/tests/test-nested-repo.out new file mode 100644 --- /dev/null +++ b/tests/test-nested-repo.out @@ -0,0 +1,8 @@ +# should print nothing +# should print ? b/x +? b/x +# should print A b/x +A b/x +# should forget b/x +forgetting b/x +# should print nothing diff --git a/tests/test-up-local-change b/tests/test-up-local-change --- a/tests/test-up-local-change +++ b/tests/test-up-local-change @@ -55,3 +55,15 @@ hg --debug up -f -m hg parents hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \ -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" + +# test a local add +cd .. +hg init a +hg init b +echo a > a/a +echo a > b/a +hg --cwd a commit -A -m a +cd b +hg add a +hg pull -u ../a +hg st diff --git a/tests/test-up-local-change.out b/tests/test-up-local-change.out --- a/tests/test-up-local-change.out +++ b/tests/test-up-local-change.out @@ -136,3 +136,10 @@ diff -r 802f095af299 a @@ -1,1 +1,1 @@ a2 -a2 +abc +adding a +pulling from ../a +requesting all changes +adding changesets +adding manifests +adding file changes +added 1 changesets with 1 changes to 1 files