|
1 # localrepo.py - read/write repository class for mercurial |
|
2 # |
|
3 # Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
4 # |
|
5 # This software may be used and distributed according to the terms |
|
6 # of the GNU General Public License, incorporated herein by reference. |
|
7 |
|
8 import sys, struct, os, util |
|
9 from repo import * |
|
10 from revlog import * |
|
11 from filelog import * |
|
12 from manifest import * |
|
13 from changelog import * |
|
14 from demandload import * |
|
15 from dirstate import * |
|
16 demandload(globals(), "re lock transaction tempfile stat") |
|
17 |
|
18 class localrepository: |
|
19 def __init__(self, ui, opener, path=None, create=0): |
|
20 self.remote = 0 |
|
21 if path and path.startswith("http://"): |
|
22 self.remote = 1 |
|
23 self.path = path |
|
24 else: |
|
25 if not path: |
|
26 p = os.getcwd() |
|
27 while not os.path.isdir(os.path.join(p, ".hg")): |
|
28 oldp = p |
|
29 p = os.path.dirname(p) |
|
30 if p == oldp: raise RepoError("no repo found") |
|
31 path = p |
|
32 self.path = os.path.join(path, ".hg") |
|
33 |
|
34 if not create and not os.path.isdir(self.path): |
|
35 raise RepoError("repository %s not found" % self.path) |
|
36 |
|
37 self.root = os.path.abspath(path) |
|
38 self.ui = ui |
|
39 |
|
40 if create: |
|
41 os.mkdir(self.path) |
|
42 os.mkdir(self.join("data")) |
|
43 |
|
44 self.opener = opener(self.path) |
|
45 self.wopener = opener(self.root) |
|
46 self.manifest = manifest(self.opener) |
|
47 self.changelog = changelog(self.opener) |
|
48 self.tagscache = None |
|
49 self.nodetagscache = None |
|
50 |
|
51 if not self.remote: |
|
52 self.dirstate = dirstate(self.opener, ui, self.root) |
|
53 try: |
|
54 self.ui.readconfig(self.opener("hgrc")) |
|
55 except IOError: pass |
|
56 |
|
57 def hook(self, name, **args): |
|
58 s = self.ui.config("hooks", name) |
|
59 if s: |
|
60 self.ui.note("running hook %s: %s\n" % (name, s)) |
|
61 old = {} |
|
62 for k, v in args.items(): |
|
63 k = k.upper() |
|
64 old[k] = os.environ.get(k, None) |
|
65 os.environ[k] = v |
|
66 |
|
67 r = os.system(s) |
|
68 |
|
69 for k, v in old.items(): |
|
70 if v != None: |
|
71 os.environ[k] = v |
|
72 else: |
|
73 del os.environ[k] |
|
74 |
|
75 if r: |
|
76 self.ui.warn("abort: %s hook failed with status %d!\n" % |
|
77 (name, r)) |
|
78 return False |
|
79 return True |
|
80 |
|
81 def tags(self): |
|
82 '''return a mapping of tag to node''' |
|
83 if not self.tagscache: |
|
84 self.tagscache = {} |
|
85 def addtag(self, k, n): |
|
86 try: |
|
87 bin_n = bin(n) |
|
88 except TypeError: |
|
89 bin_n = '' |
|
90 self.tagscache[k.strip()] = bin_n |
|
91 |
|
92 try: |
|
93 # read each head of the tags file, ending with the tip |
|
94 # and add each tag found to the map, with "newer" ones |
|
95 # taking precedence |
|
96 fl = self.file(".hgtags") |
|
97 h = fl.heads() |
|
98 h.reverse() |
|
99 for r in h: |
|
100 for l in fl.read(r).splitlines(): |
|
101 if l: |
|
102 n, k = l.split(" ", 1) |
|
103 addtag(self, k, n) |
|
104 except KeyError: |
|
105 pass |
|
106 |
|
107 try: |
|
108 f = self.opener("localtags") |
|
109 for l in f: |
|
110 n, k = l.split(" ", 1) |
|
111 addtag(self, k, n) |
|
112 except IOError: |
|
113 pass |
|
114 |
|
115 self.tagscache['tip'] = self.changelog.tip() |
|
116 |
|
117 return self.tagscache |
|
118 |
|
119 def tagslist(self): |
|
120 '''return a list of tags ordered by revision''' |
|
121 l = [] |
|
122 for t, n in self.tags().items(): |
|
123 try: |
|
124 r = self.changelog.rev(n) |
|
125 except: |
|
126 r = -2 # sort to the beginning of the list if unknown |
|
127 l.append((r,t,n)) |
|
128 l.sort() |
|
129 return [(t,n) for r,t,n in l] |
|
130 |
|
131 def nodetags(self, node): |
|
132 '''return the tags associated with a node''' |
|
133 if not self.nodetagscache: |
|
134 self.nodetagscache = {} |
|
135 for t,n in self.tags().items(): |
|
136 self.nodetagscache.setdefault(n,[]).append(t) |
|
137 return self.nodetagscache.get(node, []) |
|
138 |
|
139 def lookup(self, key): |
|
140 try: |
|
141 return self.tags()[key] |
|
142 except KeyError: |
|
143 try: |
|
144 return self.changelog.lookup(key) |
|
145 except: |
|
146 raise RepoError("unknown revision '%s'" % key) |
|
147 |
|
148 def dev(self): |
|
149 if self.remote: return -1 |
|
150 return os.stat(self.path).st_dev |
|
151 |
|
152 def local(self): |
|
153 return not self.remote |
|
154 |
|
155 def join(self, f): |
|
156 return os.path.join(self.path, f) |
|
157 |
|
158 def wjoin(self, f): |
|
159 return os.path.join(self.root, f) |
|
160 |
|
161 def file(self, f): |
|
162 if f[0] == '/': f = f[1:] |
|
163 return filelog(self.opener, f) |
|
164 |
|
165 def getcwd(self): |
|
166 return self.dirstate.getcwd() |
|
167 |
|
168 def wfile(self, f, mode='r'): |
|
169 return self.wopener(f, mode) |
|
170 |
|
171 def wread(self, filename): |
|
172 return self.wopener(filename, 'r').read() |
|
173 |
|
174 def wwrite(self, filename, data, fd=None): |
|
175 if fd: |
|
176 return fd.write(data) |
|
177 return self.wopener(filename, 'w').write(data) |
|
178 |
|
179 def transaction(self): |
|
180 # save dirstate for undo |
|
181 try: |
|
182 ds = self.opener("dirstate").read() |
|
183 except IOError: |
|
184 ds = "" |
|
185 self.opener("journal.dirstate", "w").write(ds) |
|
186 |
|
187 def after(): |
|
188 util.rename(self.join("journal"), self.join("undo")) |
|
189 util.rename(self.join("journal.dirstate"), |
|
190 self.join("undo.dirstate")) |
|
191 |
|
192 return transaction.transaction(self.ui.warn, self.opener, |
|
193 self.join("journal"), after) |
|
194 |
|
195 def recover(self): |
|
196 lock = self.lock() |
|
197 if os.path.exists(self.join("journal")): |
|
198 self.ui.status("rolling back interrupted transaction\n") |
|
199 return transaction.rollback(self.opener, self.join("journal")) |
|
200 else: |
|
201 self.ui.warn("no interrupted transaction available\n") |
|
202 |
|
203 def undo(self): |
|
204 lock = self.lock() |
|
205 if os.path.exists(self.join("undo")): |
|
206 self.ui.status("rolling back last transaction\n") |
|
207 transaction.rollback(self.opener, self.join("undo")) |
|
208 self.dirstate = None |
|
209 util.rename(self.join("undo.dirstate"), self.join("dirstate")) |
|
210 self.dirstate = dirstate(self.opener, self.ui, self.root) |
|
211 else: |
|
212 self.ui.warn("no undo information available\n") |
|
213 |
|
214 def lock(self, wait=1): |
|
215 try: |
|
216 return lock.lock(self.join("lock"), 0) |
|
217 except lock.LockHeld, inst: |
|
218 if wait: |
|
219 self.ui.warn("waiting for lock held by %s\n" % inst.args[0]) |
|
220 return lock.lock(self.join("lock"), wait) |
|
221 raise inst |
|
222 |
|
223 def rawcommit(self, files, text, user, date, p1=None, p2=None): |
|
224 orig_parent = self.dirstate.parents()[0] or nullid |
|
225 p1 = p1 or self.dirstate.parents()[0] or nullid |
|
226 p2 = p2 or self.dirstate.parents()[1] or nullid |
|
227 c1 = self.changelog.read(p1) |
|
228 c2 = self.changelog.read(p2) |
|
229 m1 = self.manifest.read(c1[0]) |
|
230 mf1 = self.manifest.readflags(c1[0]) |
|
231 m2 = self.manifest.read(c2[0]) |
|
232 changed = [] |
|
233 |
|
234 if orig_parent == p1: |
|
235 update_dirstate = 1 |
|
236 else: |
|
237 update_dirstate = 0 |
|
238 |
|
239 tr = self.transaction() |
|
240 mm = m1.copy() |
|
241 mfm = mf1.copy() |
|
242 linkrev = self.changelog.count() |
|
243 for f in files: |
|
244 try: |
|
245 t = self.wread(f) |
|
246 tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) |
|
247 r = self.file(f) |
|
248 mfm[f] = tm |
|
249 |
|
250 fp1 = m1.get(f, nullid) |
|
251 fp2 = m2.get(f, nullid) |
|
252 |
|
253 # is the same revision on two branches of a merge? |
|
254 if fp2 == fp1: |
|
255 fp2 = nullid |
|
256 |
|
257 if fp2 != nullid: |
|
258 # is one parent an ancestor of the other? |
|
259 fpa = r.ancestor(fp1, fp2) |
|
260 if fpa == fp1: |
|
261 fp1, fp2 = fp2, nullid |
|
262 elif fpa == fp2: |
|
263 fp2 = nullid |
|
264 |
|
265 # is the file unmodified from the parent? |
|
266 if t == r.read(fp1): |
|
267 # record the proper existing parent in manifest |
|
268 # no need to add a revision |
|
269 mm[f] = fp1 |
|
270 continue |
|
271 |
|
272 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) |
|
273 changed.append(f) |
|
274 if update_dirstate: |
|
275 self.dirstate.update([f], "n") |
|
276 except IOError: |
|
277 try: |
|
278 del mm[f] |
|
279 del mfm[f] |
|
280 if update_dirstate: |
|
281 self.dirstate.forget([f]) |
|
282 except: |
|
283 # deleted from p2? |
|
284 pass |
|
285 |
|
286 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) |
|
287 user = user or self.ui.username() |
|
288 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) |
|
289 tr.close() |
|
290 if update_dirstate: |
|
291 self.dirstate.setparents(n, nullid) |
|
292 |
|
293 def commit(self, files = None, text = "", user = None, date = None, |
|
294 match = util.always, force=False): |
|
295 commit = [] |
|
296 remove = [] |
|
297 changed = [] |
|
298 |
|
299 if files: |
|
300 for f in files: |
|
301 s = self.dirstate.state(f) |
|
302 if s in 'nmai': |
|
303 commit.append(f) |
|
304 elif s == 'r': |
|
305 remove.append(f) |
|
306 else: |
|
307 self.ui.warn("%s not tracked!\n" % f) |
|
308 else: |
|
309 (c, a, d, u) = self.changes(match=match) |
|
310 commit = c + a |
|
311 remove = d |
|
312 |
|
313 p1, p2 = self.dirstate.parents() |
|
314 c1 = self.changelog.read(p1) |
|
315 c2 = self.changelog.read(p2) |
|
316 m1 = self.manifest.read(c1[0]) |
|
317 mf1 = self.manifest.readflags(c1[0]) |
|
318 m2 = self.manifest.read(c2[0]) |
|
319 |
|
320 if not commit and not remove and not force and p2 == nullid: |
|
321 self.ui.status("nothing changed\n") |
|
322 return None |
|
323 |
|
324 if not self.hook("precommit"): |
|
325 return None |
|
326 |
|
327 lock = self.lock() |
|
328 tr = self.transaction() |
|
329 |
|
330 # check in files |
|
331 new = {} |
|
332 linkrev = self.changelog.count() |
|
333 commit.sort() |
|
334 for f in commit: |
|
335 self.ui.note(f + "\n") |
|
336 try: |
|
337 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) |
|
338 t = self.wread(f) |
|
339 except IOError: |
|
340 self.ui.warn("trouble committing %s!\n" % f) |
|
341 raise |
|
342 |
|
343 meta = {} |
|
344 cp = self.dirstate.copied(f) |
|
345 if cp: |
|
346 meta["copy"] = cp |
|
347 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) |
|
348 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"])) |
|
349 |
|
350 r = self.file(f) |
|
351 fp1 = m1.get(f, nullid) |
|
352 fp2 = m2.get(f, nullid) |
|
353 |
|
354 # is the same revision on two branches of a merge? |
|
355 if fp2 == fp1: |
|
356 fp2 = nullid |
|
357 |
|
358 if fp2 != nullid: |
|
359 # is one parent an ancestor of the other? |
|
360 fpa = r.ancestor(fp1, fp2) |
|
361 if fpa == fp1: |
|
362 fp1, fp2 = fp2, nullid |
|
363 elif fpa == fp2: |
|
364 fp2 = nullid |
|
365 |
|
366 # is the file unmodified from the parent? |
|
367 if not meta and t == r.read(fp1): |
|
368 # record the proper existing parent in manifest |
|
369 # no need to add a revision |
|
370 new[f] = fp1 |
|
371 continue |
|
372 |
|
373 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) |
|
374 # remember what we've added so that we can later calculate |
|
375 # the files to pull from a set of changesets |
|
376 changed.append(f) |
|
377 |
|
378 # update manifest |
|
379 m1.update(new) |
|
380 for f in remove: |
|
381 if f in m1: |
|
382 del m1[f] |
|
383 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], |
|
384 (new, remove)) |
|
385 |
|
386 # add changeset |
|
387 new = new.keys() |
|
388 new.sort() |
|
389 |
|
390 if not text: |
|
391 edittext = "" |
|
392 if p2 != nullid: |
|
393 edittext += "HG: branch merge\n" |
|
394 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn) |
|
395 edittext += "".join(["HG: changed %s\n" % f for f in changed]) |
|
396 edittext += "".join(["HG: removed %s\n" % f for f in remove]) |
|
397 if not changed and not remove: |
|
398 edittext += "HG: no files changed\n" |
|
399 edittext = self.ui.edit(edittext) |
|
400 if not edittext.rstrip(): |
|
401 return None |
|
402 text = edittext |
|
403 |
|
404 user = user or self.ui.username() |
|
405 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date) |
|
406 tr.close() |
|
407 |
|
408 self.dirstate.setparents(n) |
|
409 self.dirstate.update(new, "n") |
|
410 self.dirstate.forget(remove) |
|
411 |
|
412 if not self.hook("commit", node=hex(n)): |
|
413 return None |
|
414 return n |
|
415 |
|
416 def walk(self, node=None, files=[], match=util.always): |
|
417 if node: |
|
418 for fn in self.manifest.read(self.changelog.read(node)[0]): |
|
419 if match(fn): yield 'm', fn |
|
420 else: |
|
421 for src, fn in self.dirstate.walk(files, match): |
|
422 yield src, fn |
|
423 |
|
424 def changes(self, node1 = None, node2 = None, files = [], |
|
425 match = util.always): |
|
426 mf2, u = None, [] |
|
427 |
|
428 def fcmp(fn, mf): |
|
429 t1 = self.wread(fn) |
|
430 t2 = self.file(fn).read(mf.get(fn, nullid)) |
|
431 return cmp(t1, t2) |
|
432 |
|
433 def mfmatches(node): |
|
434 mf = dict(self.manifest.read(node)) |
|
435 for fn in mf.keys(): |
|
436 if not match(fn): |
|
437 del mf[fn] |
|
438 return mf |
|
439 |
|
440 # are we comparing the working directory? |
|
441 if not node2: |
|
442 l, c, a, d, u = self.dirstate.changes(files, match) |
|
443 |
|
444 # are we comparing working dir against its parent? |
|
445 if not node1: |
|
446 if l: |
|
447 # do a full compare of any files that might have changed |
|
448 change = self.changelog.read(self.dirstate.parents()[0]) |
|
449 mf2 = mfmatches(change[0]) |
|
450 for f in l: |
|
451 if fcmp(f, mf2): |
|
452 c.append(f) |
|
453 |
|
454 for l in c, a, d, u: |
|
455 l.sort() |
|
456 |
|
457 return (c, a, d, u) |
|
458 |
|
459 # are we comparing working dir against non-tip? |
|
460 # generate a pseudo-manifest for the working dir |
|
461 if not node2: |
|
462 if not mf2: |
|
463 change = self.changelog.read(self.dirstate.parents()[0]) |
|
464 mf2 = mfmatches(change[0]) |
|
465 for f in a + c + l: |
|
466 mf2[f] = "" |
|
467 for f in d: |
|
468 if f in mf2: del mf2[f] |
|
469 else: |
|
470 change = self.changelog.read(node2) |
|
471 mf2 = mfmatches(change[0]) |
|
472 |
|
473 # flush lists from dirstate before comparing manifests |
|
474 c, a = [], [] |
|
475 |
|
476 change = self.changelog.read(node1) |
|
477 mf1 = mfmatches(change[0]) |
|
478 |
|
479 for fn in mf2: |
|
480 if mf1.has_key(fn): |
|
481 if mf1[fn] != mf2[fn]: |
|
482 if mf2[fn] != "" or fcmp(fn, mf1): |
|
483 c.append(fn) |
|
484 del mf1[fn] |
|
485 else: |
|
486 a.append(fn) |
|
487 |
|
488 d = mf1.keys() |
|
489 |
|
490 for l in c, a, d, u: |
|
491 l.sort() |
|
492 |
|
493 return (c, a, d, u) |
|
494 |
|
495 def add(self, list): |
|
496 for f in list: |
|
497 p = self.wjoin(f) |
|
498 if not os.path.exists(p): |
|
499 self.ui.warn("%s does not exist!\n" % f) |
|
500 elif not os.path.isfile(p): |
|
501 self.ui.warn("%s not added: only files supported currently\n" % f) |
|
502 elif self.dirstate.state(f) in 'an': |
|
503 self.ui.warn("%s already tracked!\n" % f) |
|
504 else: |
|
505 self.dirstate.update([f], "a") |
|
506 |
|
507 def forget(self, list): |
|
508 for f in list: |
|
509 if self.dirstate.state(f) not in 'ai': |
|
510 self.ui.warn("%s not added!\n" % f) |
|
511 else: |
|
512 self.dirstate.forget([f]) |
|
513 |
|
514 def remove(self, list): |
|
515 for f in list: |
|
516 p = self.wjoin(f) |
|
517 if os.path.exists(p): |
|
518 self.ui.warn("%s still exists!\n" % f) |
|
519 elif self.dirstate.state(f) == 'a': |
|
520 self.ui.warn("%s never committed!\n" % f) |
|
521 self.dirstate.forget([f]) |
|
522 elif f not in self.dirstate: |
|
523 self.ui.warn("%s not tracked!\n" % f) |
|
524 else: |
|
525 self.dirstate.update([f], "r") |
|
526 |
|
527 def copy(self, source, dest): |
|
528 p = self.wjoin(dest) |
|
529 if not os.path.exists(p): |
|
530 self.ui.warn("%s does not exist!\n" % dest) |
|
531 elif not os.path.isfile(p): |
|
532 self.ui.warn("copy failed: %s is not a file\n" % dest) |
|
533 else: |
|
534 if self.dirstate.state(dest) == '?': |
|
535 self.dirstate.update([dest], "a") |
|
536 self.dirstate.copy(source, dest) |
|
537 |
|
538 def heads(self): |
|
539 return self.changelog.heads() |
|
540 |
|
541 # branchlookup returns a dict giving a list of branches for |
|
542 # each head. A branch is defined as the tag of a node or |
|
543 # the branch of the node's parents. If a node has multiple |
|
544 # branch tags, tags are eliminated if they are visible from other |
|
545 # branch tags. |
|
546 # |
|
547 # So, for this graph: a->b->c->d->e |
|
548 # \ / |
|
549 # aa -----/ |
|
550 # a has tag 2.6.12 |
|
551 # d has tag 2.6.13 |
|
552 # e would have branch tags for 2.6.12 and 2.6.13. Because the node |
|
553 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated |
|
554 # from the list. |
|
555 # |
|
556 # It is possible that more than one head will have the same branch tag. |
|
557 # callers need to check the result for multiple heads under the same |
|
558 # branch tag if that is a problem for them (ie checkout of a specific |
|
559 # branch). |
|
560 # |
|
561 # passing in a specific branch will limit the depth of the search |
|
562 # through the parents. It won't limit the branches returned in the |
|
563 # result though. |
|
564 def branchlookup(self, heads=None, branch=None): |
|
565 if not heads: |
|
566 heads = self.heads() |
|
567 headt = [ h for h in heads ] |
|
568 chlog = self.changelog |
|
569 branches = {} |
|
570 merges = [] |
|
571 seenmerge = {} |
|
572 |
|
573 # traverse the tree once for each head, recording in the branches |
|
574 # dict which tags are visible from this head. The branches |
|
575 # dict also records which tags are visible from each tag |
|
576 # while we traverse. |
|
577 while headt or merges: |
|
578 if merges: |
|
579 n, found = merges.pop() |
|
580 visit = [n] |
|
581 else: |
|
582 h = headt.pop() |
|
583 visit = [h] |
|
584 found = [h] |
|
585 seen = {} |
|
586 while visit: |
|
587 n = visit.pop() |
|
588 if n in seen: |
|
589 continue |
|
590 pp = chlog.parents(n) |
|
591 tags = self.nodetags(n) |
|
592 if tags: |
|
593 for x in tags: |
|
594 if x == 'tip': |
|
595 continue |
|
596 for f in found: |
|
597 branches.setdefault(f, {})[n] = 1 |
|
598 branches.setdefault(n, {})[n] = 1 |
|
599 break |
|
600 if n not in found: |
|
601 found.append(n) |
|
602 if branch in tags: |
|
603 continue |
|
604 seen[n] = 1 |
|
605 if pp[1] != nullid and n not in seenmerge: |
|
606 merges.append((pp[1], [x for x in found])) |
|
607 seenmerge[n] = 1 |
|
608 if pp[0] != nullid: |
|
609 visit.append(pp[0]) |
|
610 # traverse the branches dict, eliminating branch tags from each |
|
611 # head that are visible from another branch tag for that head. |
|
612 out = {} |
|
613 viscache = {} |
|
614 for h in heads: |
|
615 def visible(node): |
|
616 if node in viscache: |
|
617 return viscache[node] |
|
618 ret = {} |
|
619 visit = [node] |
|
620 while visit: |
|
621 x = visit.pop() |
|
622 if x in viscache: |
|
623 ret.update(viscache[x]) |
|
624 elif x not in ret: |
|
625 ret[x] = 1 |
|
626 if x in branches: |
|
627 visit[len(visit):] = branches[x].keys() |
|
628 viscache[node] = ret |
|
629 return ret |
|
630 if h not in branches: |
|
631 continue |
|
632 # O(n^2), but somewhat limited. This only searches the |
|
633 # tags visible from a specific head, not all the tags in the |
|
634 # whole repo. |
|
635 for b in branches[h]: |
|
636 vis = False |
|
637 for bb in branches[h].keys(): |
|
638 if b != bb: |
|
639 if b in visible(bb): |
|
640 vis = True |
|
641 break |
|
642 if not vis: |
|
643 l = out.setdefault(h, []) |
|
644 l[len(l):] = self.nodetags(b) |
|
645 return out |
|
646 |
|
647 def branches(self, nodes): |
|
648 if not nodes: nodes = [self.changelog.tip()] |
|
649 b = [] |
|
650 for n in nodes: |
|
651 t = n |
|
652 while n: |
|
653 p = self.changelog.parents(n) |
|
654 if p[1] != nullid or p[0] == nullid: |
|
655 b.append((t, n, p[0], p[1])) |
|
656 break |
|
657 n = p[0] |
|
658 return b |
|
659 |
|
660 def between(self, pairs): |
|
661 r = [] |
|
662 |
|
663 for top, bottom in pairs: |
|
664 n, l, i = top, [], 0 |
|
665 f = 1 |
|
666 |
|
667 while n != bottom: |
|
668 p = self.changelog.parents(n)[0] |
|
669 if i == f: |
|
670 l.append(n) |
|
671 f = f * 2 |
|
672 n = p |
|
673 i += 1 |
|
674 |
|
675 r.append(l) |
|
676 |
|
677 return r |
|
678 |
|
679 def newer(self, nodes): |
|
680 m = {} |
|
681 nl = [] |
|
682 pm = {} |
|
683 cl = self.changelog |
|
684 t = l = cl.count() |
|
685 |
|
686 # find the lowest numbered node |
|
687 for n in nodes: |
|
688 l = min(l, cl.rev(n)) |
|
689 m[n] = 1 |
|
690 |
|
691 for i in xrange(l, t): |
|
692 n = cl.node(i) |
|
693 if n in m: # explicitly listed |
|
694 pm[n] = 1 |
|
695 nl.append(n) |
|
696 continue |
|
697 for p in cl.parents(n): |
|
698 if p in pm: # parent listed |
|
699 pm[n] = 1 |
|
700 nl.append(n) |
|
701 break |
|
702 |
|
703 return nl |
|
704 |
|
705 def findincoming(self, remote, base=None, heads=None): |
|
706 m = self.changelog.nodemap |
|
707 search = [] |
|
708 fetch = {} |
|
709 seen = {} |
|
710 seenbranch = {} |
|
711 if base == None: |
|
712 base = {} |
|
713 |
|
714 # assume we're closer to the tip than the root |
|
715 # and start by examining the heads |
|
716 self.ui.status("searching for changes\n") |
|
717 |
|
718 if not heads: |
|
719 heads = remote.heads() |
|
720 |
|
721 unknown = [] |
|
722 for h in heads: |
|
723 if h not in m: |
|
724 unknown.append(h) |
|
725 else: |
|
726 base[h] = 1 |
|
727 |
|
728 if not unknown: |
|
729 return None |
|
730 |
|
731 rep = {} |
|
732 reqcnt = 0 |
|
733 |
|
734 # search through remote branches |
|
735 # a 'branch' here is a linear segment of history, with four parts: |
|
736 # head, root, first parent, second parent |
|
737 # (a branch always has two parents (or none) by definition) |
|
738 unknown = remote.branches(unknown) |
|
739 while unknown: |
|
740 r = [] |
|
741 while unknown: |
|
742 n = unknown.pop(0) |
|
743 if n[0] in seen: |
|
744 continue |
|
745 |
|
746 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1]))) |
|
747 if n[0] == nullid: |
|
748 break |
|
749 if n in seenbranch: |
|
750 self.ui.debug("branch already found\n") |
|
751 continue |
|
752 if n[1] and n[1] in m: # do we know the base? |
|
753 self.ui.debug("found incomplete branch %s:%s\n" |
|
754 % (short(n[0]), short(n[1]))) |
|
755 search.append(n) # schedule branch range for scanning |
|
756 seenbranch[n] = 1 |
|
757 else: |
|
758 if n[1] not in seen and n[1] not in fetch: |
|
759 if n[2] in m and n[3] in m: |
|
760 self.ui.debug("found new changeset %s\n" % |
|
761 short(n[1])) |
|
762 fetch[n[1]] = 1 # earliest unknown |
|
763 base[n[2]] = 1 # latest known |
|
764 continue |
|
765 |
|
766 for a in n[2:4]: |
|
767 if a not in rep: |
|
768 r.append(a) |
|
769 rep[a] = 1 |
|
770 |
|
771 seen[n[0]] = 1 |
|
772 |
|
773 if r: |
|
774 reqcnt += 1 |
|
775 self.ui.debug("request %d: %s\n" % |
|
776 (reqcnt, " ".join(map(short, r)))) |
|
777 for p in range(0, len(r), 10): |
|
778 for b in remote.branches(r[p:p+10]): |
|
779 self.ui.debug("received %s:%s\n" % |
|
780 (short(b[0]), short(b[1]))) |
|
781 if b[0] in m: |
|
782 self.ui.debug("found base node %s\n" % short(b[0])) |
|
783 base[b[0]] = 1 |
|
784 elif b[0] not in seen: |
|
785 unknown.append(b) |
|
786 |
|
787 # do binary search on the branches we found |
|
788 while search: |
|
789 n = search.pop(0) |
|
790 reqcnt += 1 |
|
791 l = remote.between([(n[0], n[1])])[0] |
|
792 l.append(n[1]) |
|
793 p = n[0] |
|
794 f = 1 |
|
795 for i in l: |
|
796 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i))) |
|
797 if i in m: |
|
798 if f <= 2: |
|
799 self.ui.debug("found new branch changeset %s\n" % |
|
800 short(p)) |
|
801 fetch[p] = 1 |
|
802 base[i] = 1 |
|
803 else: |
|
804 self.ui.debug("narrowed branch search to %s:%s\n" |
|
805 % (short(p), short(i))) |
|
806 search.append((p, i)) |
|
807 break |
|
808 p, f = i, f * 2 |
|
809 |
|
810 # sanity check our fetch list |
|
811 for f in fetch.keys(): |
|
812 if f in m: |
|
813 raise RepoError("already have changeset " + short(f[:4])) |
|
814 |
|
815 if base.keys() == [nullid]: |
|
816 self.ui.warn("warning: pulling from an unrelated repository!\n") |
|
817 |
|
818 self.ui.note("found new changesets starting at " + |
|
819 " ".join([short(f) for f in fetch]) + "\n") |
|
820 |
|
821 self.ui.debug("%d total queries\n" % reqcnt) |
|
822 |
|
823 return fetch.keys() |
|
824 |
|
825 def findoutgoing(self, remote, base=None, heads=None): |
|
826 if base == None: |
|
827 base = {} |
|
828 self.findincoming(remote, base, heads) |
|
829 |
|
830 self.ui.debug("common changesets up to " |
|
831 + " ".join(map(short, base.keys())) + "\n") |
|
832 |
|
833 remain = dict.fromkeys(self.changelog.nodemap) |
|
834 |
|
835 # prune everything remote has from the tree |
|
836 del remain[nullid] |
|
837 remove = base.keys() |
|
838 while remove: |
|
839 n = remove.pop(0) |
|
840 if n in remain: |
|
841 del remain[n] |
|
842 for p in self.changelog.parents(n): |
|
843 remove.append(p) |
|
844 |
|
845 # find every node whose parents have been pruned |
|
846 subset = [] |
|
847 for n in remain: |
|
848 p1, p2 = self.changelog.parents(n) |
|
849 if p1 not in remain and p2 not in remain: |
|
850 subset.append(n) |
|
851 |
|
852 # this is the set of all roots we have to push |
|
853 return subset |
|
854 |
|
855 def pull(self, remote): |
|
856 lock = self.lock() |
|
857 |
|
858 # if we have an empty repo, fetch everything |
|
859 if self.changelog.tip() == nullid: |
|
860 self.ui.status("requesting all changes\n") |
|
861 fetch = [nullid] |
|
862 else: |
|
863 fetch = self.findincoming(remote) |
|
864 |
|
865 if not fetch: |
|
866 self.ui.status("no changes found\n") |
|
867 return 1 |
|
868 |
|
869 cg = remote.changegroup(fetch) |
|
870 return self.addchangegroup(cg) |
|
871 |
|
872 def push(self, remote, force=False): |
|
873 lock = remote.lock() |
|
874 |
|
875 base = {} |
|
876 heads = remote.heads() |
|
877 inc = self.findincoming(remote, base, heads) |
|
878 if not force and inc: |
|
879 self.ui.warn("abort: unsynced remote changes!\n") |
|
880 self.ui.status("(did you forget to sync? use push -f to force)\n") |
|
881 return 1 |
|
882 |
|
883 update = self.findoutgoing(remote, base) |
|
884 if not update: |
|
885 self.ui.status("no changes found\n") |
|
886 return 1 |
|
887 elif not force: |
|
888 if len(heads) < len(self.changelog.heads()): |
|
889 self.ui.warn("abort: push creates new remote branches!\n") |
|
890 self.ui.status("(did you forget to merge?" + |
|
891 " use push -f to force)\n") |
|
892 return 1 |
|
893 |
|
894 cg = self.changegroup(update) |
|
895 return remote.addchangegroup(cg) |
|
896 |
|
897 def changegroup(self, basenodes): |
|
898 class genread: |
|
899 def __init__(self, generator): |
|
900 self.g = generator |
|
901 self.buf = "" |
|
902 def fillbuf(self): |
|
903 self.buf += "".join(self.g) |
|
904 |
|
905 def read(self, l): |
|
906 while l > len(self.buf): |
|
907 try: |
|
908 self.buf += self.g.next() |
|
909 except StopIteration: |
|
910 break |
|
911 d, self.buf = self.buf[:l], self.buf[l:] |
|
912 return d |
|
913 |
|
914 def gengroup(): |
|
915 nodes = self.newer(basenodes) |
|
916 |
|
917 # construct the link map |
|
918 linkmap = {} |
|
919 for n in nodes: |
|
920 linkmap[self.changelog.rev(n)] = n |
|
921 |
|
922 # construct a list of all changed files |
|
923 changed = {} |
|
924 for n in nodes: |
|
925 c = self.changelog.read(n) |
|
926 for f in c[3]: |
|
927 changed[f] = 1 |
|
928 changed = changed.keys() |
|
929 changed.sort() |
|
930 |
|
931 # the changegroup is changesets + manifests + all file revs |
|
932 revs = [ self.changelog.rev(n) for n in nodes ] |
|
933 |
|
934 for y in self.changelog.group(linkmap): yield y |
|
935 for y in self.manifest.group(linkmap): yield y |
|
936 for f in changed: |
|
937 yield struct.pack(">l", len(f) + 4) + f |
|
938 g = self.file(f).group(linkmap) |
|
939 for y in g: |
|
940 yield y |
|
941 |
|
942 yield struct.pack(">l", 0) |
|
943 |
|
944 return genread(gengroup()) |
|
945 |
|
946 def addchangegroup(self, source): |
|
947 |
|
948 def getchunk(): |
|
949 d = source.read(4) |
|
950 if not d: return "" |
|
951 l = struct.unpack(">l", d)[0] |
|
952 if l <= 4: return "" |
|
953 return source.read(l - 4) |
|
954 |
|
955 def getgroup(): |
|
956 while 1: |
|
957 c = getchunk() |
|
958 if not c: break |
|
959 yield c |
|
960 |
|
961 def csmap(x): |
|
962 self.ui.debug("add changeset %s\n" % short(x)) |
|
963 return self.changelog.count() |
|
964 |
|
965 def revmap(x): |
|
966 return self.changelog.rev(x) |
|
967 |
|
968 if not source: return |
|
969 changesets = files = revisions = 0 |
|
970 |
|
971 tr = self.transaction() |
|
972 |
|
973 oldheads = len(self.changelog.heads()) |
|
974 |
|
975 # pull off the changeset group |
|
976 self.ui.status("adding changesets\n") |
|
977 co = self.changelog.tip() |
|
978 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique |
|
979 changesets = self.changelog.rev(cn) - self.changelog.rev(co) |
|
980 |
|
981 # pull off the manifest group |
|
982 self.ui.status("adding manifests\n") |
|
983 mm = self.manifest.tip() |
|
984 mo = self.manifest.addgroup(getgroup(), revmap, tr) |
|
985 |
|
986 # process the files |
|
987 self.ui.status("adding file changes\n") |
|
988 while 1: |
|
989 f = getchunk() |
|
990 if not f: break |
|
991 self.ui.debug("adding %s revisions\n" % f) |
|
992 fl = self.file(f) |
|
993 o = fl.count() |
|
994 n = fl.addgroup(getgroup(), revmap, tr) |
|
995 revisions += fl.count() - o |
|
996 files += 1 |
|
997 |
|
998 newheads = len(self.changelog.heads()) |
|
999 heads = "" |
|
1000 if oldheads and newheads > oldheads: |
|
1001 heads = " (+%d heads)" % (newheads - oldheads) |
|
1002 |
|
1003 self.ui.status(("added %d changesets" + |
|
1004 " with %d changes to %d files%s\n") |
|
1005 % (changesets, revisions, files, heads)) |
|
1006 |
|
1007 tr.close() |
|
1008 |
|
1009 if not self.hook("changegroup"): |
|
1010 return 1 |
|
1011 |
|
1012 return |
|
1013 |
|
1014 def update(self, node, allow=False, force=False, choose=None, |
|
1015 moddirstate=True): |
|
1016 pl = self.dirstate.parents() |
|
1017 if not force and pl[1] != nullid: |
|
1018 self.ui.warn("aborting: outstanding uncommitted merges\n") |
|
1019 return 1 |
|
1020 |
|
1021 p1, p2 = pl[0], node |
|
1022 pa = self.changelog.ancestor(p1, p2) |
|
1023 m1n = self.changelog.read(p1)[0] |
|
1024 m2n = self.changelog.read(p2)[0] |
|
1025 man = self.manifest.ancestor(m1n, m2n) |
|
1026 m1 = self.manifest.read(m1n) |
|
1027 mf1 = self.manifest.readflags(m1n) |
|
1028 m2 = self.manifest.read(m2n) |
|
1029 mf2 = self.manifest.readflags(m2n) |
|
1030 ma = self.manifest.read(man) |
|
1031 mfa = self.manifest.readflags(man) |
|
1032 |
|
1033 (c, a, d, u) = self.changes() |
|
1034 |
|
1035 # is this a jump, or a merge? i.e. is there a linear path |
|
1036 # from p1 to p2? |
|
1037 linear_path = (pa == p1 or pa == p2) |
|
1038 |
|
1039 # resolve the manifest to determine which files |
|
1040 # we care about merging |
|
1041 self.ui.note("resolving manifests\n") |
|
1042 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" % |
|
1043 (force, allow, moddirstate, linear_path)) |
|
1044 self.ui.debug(" ancestor %s local %s remote %s\n" % |
|
1045 (short(man), short(m1n), short(m2n))) |
|
1046 |
|
1047 merge = {} |
|
1048 get = {} |
|
1049 remove = [] |
|
1050 |
|
1051 # construct a working dir manifest |
|
1052 mw = m1.copy() |
|
1053 mfw = mf1.copy() |
|
1054 umap = dict.fromkeys(u) |
|
1055 |
|
1056 for f in a + c + u: |
|
1057 mw[f] = "" |
|
1058 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) |
|
1059 |
|
1060 for f in d: |
|
1061 if f in mw: del mw[f] |
|
1062 |
|
1063 # If we're jumping between revisions (as opposed to merging), |
|
1064 # and if neither the working directory nor the target rev has |
|
1065 # the file, then we need to remove it from the dirstate, to |
|
1066 # prevent the dirstate from listing the file when it is no |
|
1067 # longer in the manifest. |
|
1068 if moddirstate and linear_path and f not in m2: |
|
1069 self.dirstate.forget((f,)) |
|
1070 |
|
1071 # Compare manifests |
|
1072 for f, n in mw.iteritems(): |
|
1073 if choose and not choose(f): continue |
|
1074 if f in m2: |
|
1075 s = 0 |
|
1076 |
|
1077 # is the wfile new since m1, and match m2? |
|
1078 if f not in m1: |
|
1079 t1 = self.wread(f) |
|
1080 t2 = self.file(f).read(m2[f]) |
|
1081 if cmp(t1, t2) == 0: |
|
1082 n = m2[f] |
|
1083 del t1, t2 |
|
1084 |
|
1085 # are files different? |
|
1086 if n != m2[f]: |
|
1087 a = ma.get(f, nullid) |
|
1088 # are both different from the ancestor? |
|
1089 if n != a and m2[f] != a: |
|
1090 self.ui.debug(" %s versions differ, resolve\n" % f) |
|
1091 # merge executable bits |
|
1092 # "if we changed or they changed, change in merge" |
|
1093 a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1094 mode = ((a^b) | (a^c)) ^ a |
|
1095 merge[f] = (m1.get(f, nullid), m2[f], mode) |
|
1096 s = 1 |
|
1097 # are we clobbering? |
|
1098 # is remote's version newer? |
|
1099 # or are we going back in time? |
|
1100 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): |
|
1101 self.ui.debug(" remote %s is newer, get\n" % f) |
|
1102 get[f] = m2[f] |
|
1103 s = 1 |
|
1104 elif f in umap: |
|
1105 # this unknown file is the same as the checkout |
|
1106 get[f] = m2[f] |
|
1107 |
|
1108 if not s and mfw[f] != mf2[f]: |
|
1109 if force: |
|
1110 self.ui.debug(" updating permissions for %s\n" % f) |
|
1111 util.set_exec(self.wjoin(f), mf2[f]) |
|
1112 else: |
|
1113 a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1114 mode = ((a^b) | (a^c)) ^ a |
|
1115 if mode != b: |
|
1116 self.ui.debug(" updating permissions for %s\n" % f) |
|
1117 util.set_exec(self.wjoin(f), mode) |
|
1118 del m2[f] |
|
1119 elif f in ma: |
|
1120 if n != ma[f]: |
|
1121 r = "d" |
|
1122 if not force and (linear_path or allow): |
|
1123 r = self.ui.prompt( |
|
1124 (" local changed %s which remote deleted\n" % f) + |
|
1125 "(k)eep or (d)elete?", "[kd]", "k") |
|
1126 if r == "d": |
|
1127 remove.append(f) |
|
1128 else: |
|
1129 self.ui.debug("other deleted %s\n" % f) |
|
1130 remove.append(f) # other deleted it |
|
1131 else: |
|
1132 if n == m1.get(f, nullid): # same as parent |
|
1133 if p2 == pa: # going backwards? |
|
1134 self.ui.debug("remote deleted %s\n" % f) |
|
1135 remove.append(f) |
|
1136 else: |
|
1137 self.ui.debug("local created %s, keeping\n" % f) |
|
1138 else: |
|
1139 self.ui.debug("working dir created %s, keeping\n" % f) |
|
1140 |
|
1141 for f, n in m2.iteritems(): |
|
1142 if choose and not choose(f): continue |
|
1143 if f[0] == "/": continue |
|
1144 if f in ma and n != ma[f]: |
|
1145 r = "k" |
|
1146 if not force and (linear_path or allow): |
|
1147 r = self.ui.prompt( |
|
1148 ("remote changed %s which local deleted\n" % f) + |
|
1149 "(k)eep or (d)elete?", "[kd]", "k") |
|
1150 if r == "k": get[f] = n |
|
1151 elif f not in ma: |
|
1152 self.ui.debug("remote created %s\n" % f) |
|
1153 get[f] = n |
|
1154 else: |
|
1155 if force or p2 == pa: # going backwards? |
|
1156 self.ui.debug("local deleted %s, recreating\n" % f) |
|
1157 get[f] = n |
|
1158 else: |
|
1159 self.ui.debug("local deleted %s\n" % f) |
|
1160 |
|
1161 del mw, m1, m2, ma |
|
1162 |
|
1163 if force: |
|
1164 for f in merge: |
|
1165 get[f] = merge[f][1] |
|
1166 merge = {} |
|
1167 |
|
1168 if linear_path or force: |
|
1169 # we don't need to do any magic, just jump to the new rev |
|
1170 branch_merge = False |
|
1171 p1, p2 = p2, nullid |
|
1172 else: |
|
1173 if not allow: |
|
1174 self.ui.status("this update spans a branch" + |
|
1175 " affecting the following files:\n") |
|
1176 fl = merge.keys() + get.keys() |
|
1177 fl.sort() |
|
1178 for f in fl: |
|
1179 cf = "" |
|
1180 if f in merge: cf = " (resolve)" |
|
1181 self.ui.status(" %s%s\n" % (f, cf)) |
|
1182 self.ui.warn("aborting update spanning branches!\n") |
|
1183 self.ui.status("(use update -m to merge across branches" + |
|
1184 " or -C to lose changes)\n") |
|
1185 return 1 |
|
1186 branch_merge = True |
|
1187 |
|
1188 if moddirstate: |
|
1189 self.dirstate.setparents(p1, p2) |
|
1190 |
|
1191 # get the files we don't need to change |
|
1192 files = get.keys() |
|
1193 files.sort() |
|
1194 for f in files: |
|
1195 if f[0] == "/": continue |
|
1196 self.ui.note("getting %s\n" % f) |
|
1197 t = self.file(f).read(get[f]) |
|
1198 try: |
|
1199 self.wwrite(f, t) |
|
1200 except IOError: |
|
1201 os.makedirs(os.path.dirname(self.wjoin(f))) |
|
1202 self.wwrite(f, t) |
|
1203 util.set_exec(self.wjoin(f), mf2[f]) |
|
1204 if moddirstate: |
|
1205 if branch_merge: |
|
1206 self.dirstate.update([f], 'n', st_mtime=-1) |
|
1207 else: |
|
1208 self.dirstate.update([f], 'n') |
|
1209 |
|
1210 # merge the tricky bits |
|
1211 files = merge.keys() |
|
1212 files.sort() |
|
1213 for f in files: |
|
1214 self.ui.status("merging %s\n" % f) |
|
1215 my, other, flag = merge[f] |
|
1216 self.merge3(f, my, other) |
|
1217 util.set_exec(self.wjoin(f), flag) |
|
1218 if moddirstate: |
|
1219 if branch_merge: |
|
1220 # We've done a branch merge, mark this file as merged |
|
1221 # so that we properly record the merger later |
|
1222 self.dirstate.update([f], 'm') |
|
1223 else: |
|
1224 # We've update-merged a locally modified file, so |
|
1225 # we set the dirstate to emulate a normal checkout |
|
1226 # of that file some time in the past. Thus our |
|
1227 # merge will appear as a normal local file |
|
1228 # modification. |
|
1229 f_len = len(self.file(f).read(other)) |
|
1230 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) |
|
1231 |
|
1232 remove.sort() |
|
1233 for f in remove: |
|
1234 self.ui.note("removing %s\n" % f) |
|
1235 try: |
|
1236 os.unlink(self.wjoin(f)) |
|
1237 except OSError, inst: |
|
1238 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst)) |
|
1239 # try removing directories that might now be empty |
|
1240 try: os.removedirs(os.path.dirname(self.wjoin(f))) |
|
1241 except: pass |
|
1242 if moddirstate: |
|
1243 if branch_merge: |
|
1244 self.dirstate.update(remove, 'r') |
|
1245 else: |
|
1246 self.dirstate.forget(remove) |
|
1247 |
|
1248 def merge3(self, fn, my, other): |
|
1249 """perform a 3-way merge in the working directory""" |
|
1250 |
|
1251 def temp(prefix, node): |
|
1252 pre = "%s~%s." % (os.path.basename(fn), prefix) |
|
1253 (fd, name) = tempfile.mkstemp("", pre) |
|
1254 f = os.fdopen(fd, "wb") |
|
1255 self.wwrite(fn, fl.read(node), f) |
|
1256 f.close() |
|
1257 return name |
|
1258 |
|
1259 fl = self.file(fn) |
|
1260 base = fl.ancestor(my, other) |
|
1261 a = self.wjoin(fn) |
|
1262 b = temp("base", base) |
|
1263 c = temp("other", other) |
|
1264 |
|
1265 self.ui.note("resolving %s\n" % fn) |
|
1266 self.ui.debug("file %s: other %s ancestor %s\n" % |
|
1267 (fn, short(other), short(base))) |
|
1268 |
|
1269 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") |
|
1270 or "hgmerge") |
|
1271 r = os.system("%s %s %s %s" % (cmd, a, b, c)) |
|
1272 if r: |
|
1273 self.ui.warn("merging %s failed!\n" % fn) |
|
1274 |
|
1275 os.unlink(b) |
|
1276 os.unlink(c) |
|
1277 |
|
1278 def verify(self): |
|
1279 filelinkrevs = {} |
|
1280 filenodes = {} |
|
1281 changesets = revisions = files = 0 |
|
1282 errors = 0 |
|
1283 |
|
1284 seen = {} |
|
1285 self.ui.status("checking changesets\n") |
|
1286 for i in range(self.changelog.count()): |
|
1287 changesets += 1 |
|
1288 n = self.changelog.node(i) |
|
1289 if n in seen: |
|
1290 self.ui.warn("duplicate changeset at revision %d\n" % i) |
|
1291 errors += 1 |
|
1292 seen[n] = 1 |
|
1293 |
|
1294 for p in self.changelog.parents(n): |
|
1295 if p not in self.changelog.nodemap: |
|
1296 self.ui.warn("changeset %s has unknown parent %s\n" % |
|
1297 (short(n), short(p))) |
|
1298 errors += 1 |
|
1299 try: |
|
1300 changes = self.changelog.read(n) |
|
1301 except Exception, inst: |
|
1302 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst)) |
|
1303 errors += 1 |
|
1304 |
|
1305 for f in changes[3]: |
|
1306 filelinkrevs.setdefault(f, []).append(i) |
|
1307 |
|
1308 seen = {} |
|
1309 self.ui.status("checking manifests\n") |
|
1310 for i in range(self.manifest.count()): |
|
1311 n = self.manifest.node(i) |
|
1312 if n in seen: |
|
1313 self.ui.warn("duplicate manifest at revision %d\n" % i) |
|
1314 errors += 1 |
|
1315 seen[n] = 1 |
|
1316 |
|
1317 for p in self.manifest.parents(n): |
|
1318 if p not in self.manifest.nodemap: |
|
1319 self.ui.warn("manifest %s has unknown parent %s\n" % |
|
1320 (short(n), short(p))) |
|
1321 errors += 1 |
|
1322 |
|
1323 try: |
|
1324 delta = mdiff.patchtext(self.manifest.delta(n)) |
|
1325 except KeyboardInterrupt: |
|
1326 self.ui.warn("aborted") |
|
1327 sys.exit(0) |
|
1328 except Exception, inst: |
|
1329 self.ui.warn("unpacking manifest %s: %s\n" |
|
1330 % (short(n), inst)) |
|
1331 errors += 1 |
|
1332 |
|
1333 ff = [ l.split('\0') for l in delta.splitlines() ] |
|
1334 for f, fn in ff: |
|
1335 filenodes.setdefault(f, {})[bin(fn[:40])] = 1 |
|
1336 |
|
1337 self.ui.status("crosschecking files in changesets and manifests\n") |
|
1338 for f in filenodes: |
|
1339 if f not in filelinkrevs: |
|
1340 self.ui.warn("file %s in manifest but not in changesets\n" % f) |
|
1341 errors += 1 |
|
1342 |
|
1343 for f in filelinkrevs: |
|
1344 if f not in filenodes: |
|
1345 self.ui.warn("file %s in changeset but not in manifest\n" % f) |
|
1346 errors += 1 |
|
1347 |
|
1348 self.ui.status("checking files\n") |
|
1349 ff = filenodes.keys() |
|
1350 ff.sort() |
|
1351 for f in ff: |
|
1352 if f == "/dev/null": continue |
|
1353 files += 1 |
|
1354 fl = self.file(f) |
|
1355 nodes = { nullid: 1 } |
|
1356 seen = {} |
|
1357 for i in range(fl.count()): |
|
1358 revisions += 1 |
|
1359 n = fl.node(i) |
|
1360 |
|
1361 if n in seen: |
|
1362 self.ui.warn("%s: duplicate revision %d\n" % (f, i)) |
|
1363 errors += 1 |
|
1364 |
|
1365 if n not in filenodes[f]: |
|
1366 self.ui.warn("%s: %d:%s not in manifests\n" |
|
1367 % (f, i, short(n))) |
|
1368 errors += 1 |
|
1369 else: |
|
1370 del filenodes[f][n] |
|
1371 |
|
1372 flr = fl.linkrev(n) |
|
1373 if flr not in filelinkrevs[f]: |
|
1374 self.ui.warn("%s:%s points to unexpected changeset %d\n" |
|
1375 % (f, short(n), fl.linkrev(n))) |
|
1376 errors += 1 |
|
1377 else: |
|
1378 filelinkrevs[f].remove(flr) |
|
1379 |
|
1380 # verify contents |
|
1381 try: |
|
1382 t = fl.read(n) |
|
1383 except Exception, inst: |
|
1384 self.ui.warn("unpacking file %s %s: %s\n" |
|
1385 % (f, short(n), inst)) |
|
1386 errors += 1 |
|
1387 |
|
1388 # verify parents |
|
1389 (p1, p2) = fl.parents(n) |
|
1390 if p1 not in nodes: |
|
1391 self.ui.warn("file %s:%s unknown parent 1 %s" % |
|
1392 (f, short(n), short(p1))) |
|
1393 errors += 1 |
|
1394 if p2 not in nodes: |
|
1395 self.ui.warn("file %s:%s unknown parent 2 %s" % |
|
1396 (f, short(n), short(p1))) |
|
1397 errors += 1 |
|
1398 nodes[n] = 1 |
|
1399 |
|
1400 # cross-check |
|
1401 for node in filenodes[f]: |
|
1402 self.ui.warn("node %s in manifests not in %s\n" |
|
1403 % (hex(node), f)) |
|
1404 errors += 1 |
|
1405 |
|
1406 self.ui.status("%d files, %d changesets, %d total revisions\n" % |
|
1407 (files, changesets, revisions)) |
|
1408 |
|
1409 if errors: |
|
1410 self.ui.warn("%d integrity errors encountered!\n" % errors) |
|
1411 return 1 |