Browse Source

Fix duplicate objects in "thin+cached" packs from DFS

The DfsReader must offer every representation of an object that
exists on the local repository when PackWriter asks for them. This
is necessary to identify objects in the thin pack part that are also
in the cached pack that will be appended onto the end of the stream.

Without looking at all alternatives, PackWriter may pack the same
object twice (once in the thin section, again in the cached base
pack). This may cause the command line C version to go into an
infinite loop when repacking the resulting repository, as it may see
a delta chain cycle with one of those duplicate copies of the object.

Previously the DfsReader tried to avoid looking at packs that it
might not care about, but this is insufficient, as all versions
must be considered during pack generation.

Change-Id: Ibf4a3e8ea5c42aef16404ffc42a5781edd97b18e
stable-1.2
Shawn O. Pearce 13 years ago
parent
commit
2fbf296fda
  1. 39
      org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java

39
org.eclipse.jgit/src/org/eclipse/jgit/storage/dfs/DfsReader.java

@ -444,38 +444,31 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs {
return; return;
} }
int packIndex = 0; int objectCount = 0;
DfsPackFile packLast = packList[packIndex];
int updated = 0; int updated = 0;
int posted = 0; int posted = 0;
List<DfsObjectRepresentation> all = new BlockList<DfsObjectRepresentation>(); List<DfsObjectRepresentation> all = new BlockList<DfsObjectRepresentation>();
for (ObjectToPack otp : objects) { for (ObjectToPack otp : objects) {
long p = packLast.findOffset(this, otp); boolean found = false;
if (p < 0) { for (int packIndex = 0; packIndex < packList.length; packIndex++) {
int skip = packIndex; DfsPackFile pack = packList[packIndex];
for (packIndex = 0; packIndex < packList.length; packIndex++) { long p = pack.findOffset(this, otp);
if (skip == packIndex) if (0 < p) {
continue;
packLast = packList[packIndex];
p = packLast.findOffset(this, otp);
if (0 < p)
break;
}
if (packIndex == packList.length)
throw new MissingObjectException(otp, otp.getType());
}
DfsObjectRepresentation r = new DfsObjectRepresentation(otp); DfsObjectRepresentation r = new DfsObjectRepresentation(otp);
r.pack = packLast; r.pack = pack;
r.packIndex = packIndex; r.packIndex = packIndex;
r.offset = p; r.offset = p;
all.add(r); all.add(r);
found = true;
}
}
if (!found)
throw new MissingObjectException(otp, otp.getType());
if ((++updated & 1) == 1) { if ((++updated & 1) == 1) {
monitor.update(1); // Update by 50%, the other 50% is below. monitor.update(1); // Update by 50%, the other 50% is below.
posted++; posted++;
} }
objectCount++;
} }
Collections.sort(all, REPRESENTATION_SORT); Collections.sort(all, REPRESENTATION_SORT);
@ -484,7 +477,7 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs {
for (DfsObjectRepresentation r : all) { for (DfsObjectRepresentation r : all) {
r.pack.representation(this, r); r.pack.representation(this, r);
packer.select(r.object, r); packer.select(r.object, r);
if ((++updated & 1) == 1) { if ((++updated & 1) == 1 && posted < objectCount) {
monitor.update(1); monitor.update(1);
posted++; posted++;
} }
@ -492,8 +485,8 @@ final class DfsReader extends ObjectReader implements ObjectReuseAsIs {
} finally { } finally {
cancelReadAhead(); cancelReadAhead();
} }
if (posted < all.size()) if (posted < objectCount)
monitor.update(all.size() - posted); monitor.update(objectCount - posted);
} }
public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp, public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp,

Loading…
Cancel
Save