changeset 5449:17a4b20eda7b

chunkiter: handle large reads more efficiently - for large reads, don't attempt to read more than necessary - if we've gathered the exact number of bytes needed, avoid a string copy
author Matt Mackall <mpm@selenic.com>
date Thu, 11 Oct 2007 00:46:52 -0500
parents e038738714fd
children c728424d44c6
files mercurial/util.py
diffstat 1 files changed, 5 insertions(+), 2 deletions(-) [+]
line wrap: on
line diff
--- a/mercurial/util.py
+++ b/mercurial/util.py
@@ -1408,7 +1408,7 @@ class chunkbuffer(object):
         Returns less than L bytes if the iterator runs dry."""
         if l > len(self.buf) and self.iter:
             # Clamp to a multiple of self.targetsize
-            targetsize = self.targetsize * ((l // self.targetsize) + 1)
+            targetsize = max(l, self.targetsize)
             collector = cStringIO.StringIO()
             collector.write(self.buf)
             collected = len(self.buf)
@@ -1420,7 +1420,10 @@ class chunkbuffer(object):
             if collected < targetsize:
                 self.iter = False
             self.buf = collector.getvalue()
-        s, self.buf = self.buf[:l], buffer(self.buf, l)
+        if len(self.buf) == l:
+            s, self.buf = self.buf, ''
+        else:
+            s, self.buf = self.buf[:l], buffer(self.buf, l)
         return s
 
 def filechunkiter(f, size=65536, limit=None):