From 2fbf296fda205446eac11a13abd4fcdb182f28d9 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 16 Nov 2011 15:04:44 -0800 Subject: [PATCH] Fix duplicate objects in "thin+cached" packs from DFS The DfsReader must offer every representation of an object that exists on the local repository when PackWriter asks for them. This is necessary to identify objects in the thin pack part that are also in the cached pack that will be appended onto the end of the stream. Without looking at all alternatives, PackWriter may pack the same object twice (once in the thin section, again in the cached base pack). This may cause the command line C version to go into an infinite loop when repacking the resulting repository, as it may see a delta chain cycle with one of those duplicate copies of the object. Previously the DfsReader tried to avoid looking at packs that it might not care about, but this is insufficient, as all versions must be considered during pack generation. Change-Id: Ibf4a3e8ea5c42aef16404ffc42a5781edd97b18e --- .../eclipse/jgit/storage/dfs/DfsReader.java | 43 ++++++++----------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java index 0772278b1..621480bc5 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java @@ -444,38 +444,31 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs { return; } - int packIndex = 0; - DfsPackFile packLast = packList[packIndex]; - + int objectCount = 0; int updated = 0; int posted = 0; List all = new BlockList(); for (ObjectToPack otp : objects) { - long p = packLast.findOffset(this, otp); - if (p < 0) { - int skip = packIndex; - for (packIndex = 0; packIndex < packList.length; packIndex++) { - if (skip == packIndex) - continue; - packLast = packList[packIndex]; - p = packLast.findOffset(this, otp); - if (0 < p) - break; + boolean found = false; + for (int packIndex = 0; packIndex < packList.length; packIndex++) { + DfsPackFile pack = packList[packIndex]; + long p = pack.findOffset(this, otp); + if (0 < p) { + DfsObjectRepresentation r = new DfsObjectRepresentation(otp); + r.pack = pack; + r.packIndex = packIndex; + r.offset = p; + all.add(r); + found = true; } - if (packIndex == packList.length) - throw new MissingObjectException(otp, otp.getType()); } - - DfsObjectRepresentation r = new DfsObjectRepresentation(otp); - r.pack = packLast; - r.packIndex = packIndex; - r.offset = p; - all.add(r); - + if (!found) + throw new MissingObjectException(otp, otp.getType()); if ((++updated & 1) == 1) { monitor.update(1); // Update by 50%, the other 50% is below. posted++; } + objectCount++; } Collections.sort(all, REPRESENTATION_SORT); @@ -484,7 +477,7 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs { for (DfsObjectRepresentation r : all) { r.pack.representation(this, r); packer.select(r.object, r); - if ((++updated & 1) == 1) { + if ((++updated & 1) == 1 && posted < objectCount) { monitor.update(1); posted++; } @@ -492,8 +485,8 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs { } finally { cancelReadAhead(); } - if (posted < all.size()) - monitor.update(all.size() - posted); + if (posted < objectCount) + monitor.update(objectCount - posted); } public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp,