Browse Source

dfs: Fix DataFormatException: 0 bytes to inflate

When a file uses a different block size (e.g.  500) than the cache
(e.g.  512), and the DfsPackFile's blockSize field has not been
initialized, the cache misaligns block loads.  The cache uses its
default of 512 to compute the block alignment instead of the file's
500.

This causes DfsReader try to set an empty range into an Inflater,
resulting in an object being unable to load.

Change-Id: I7d6352708225f62ef2f216d1ddcbaa64be113df6
stable-4.9
Shawn Pearce 8 years ago
parent
commit
f414f7de1f
  1. 47
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
  2. 4
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java
  3. 26
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java

47
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java

@ -69,10 +69,7 @@ public class DfsBlockCacheTest {
@Before
public void setUp() {
rng = new TestRng(testName.getMethodName());
DfsBlockCache.reconfigure(new DfsBlockCacheConfig()
.setBlockSize(512)
.setBlockLimit(1 << 20));
cache = DfsBlockCache.getInstance();
resetCache();
}
@Test
@ -100,4 +97,46 @@ public class DfsBlockCacheTest {
assertEquals(0, cache.getMissCount());
assertEquals(oldSize, cache.getCurrentSize());
}
@Test
public void weirdBlockSize() throws Exception {
DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
InMemoryRepository r1 = new InMemoryRepository(repo);
byte[] content1 = rng.nextBytes(4);
byte[] content2 = rng.nextBytes(424242);
ObjectId id1;
ObjectId id2;
try (ObjectInserter ins = r1.newObjectInserter()) {
id1 = ins.insert(OBJ_BLOB, content1);
id2 = ins.insert(OBJ_BLOB, content2);
ins.flush();
}
resetCache();
List<DfsPackDescription> packs = r1.getObjectDatabase().listPacks();
InMemoryRepository r2 = new InMemoryRepository(repo);
r2.getObjectDatabase().setReadableChannelBlockSizeForTest(500);
r2.getObjectDatabase().commitPack(packs, Collections.emptyList());
try (ObjectReader rdr = r2.newObjectReader()) {
byte[] actual = rdr.open(id1, OBJ_BLOB).getBytes();
assertTrue(Arrays.equals(content1, actual));
}
InMemoryRepository r3 = new InMemoryRepository(repo);
r3.getObjectDatabase().setReadableChannelBlockSizeForTest(500);
r3.getObjectDatabase().commitPack(packs, Collections.emptyList());
try (ObjectReader rdr = r3.newObjectReader()) {
byte[] actual = rdr.open(id2, OBJ_BLOB).getBytes();
assertTrue(Arrays.equals(content2, actual));
}
}
private void resetCache() {
DfsBlockCache.reconfigure(new DfsBlockCacheConfig()
.setBlockSize(512)
.setBlockLimit(1 << 20));
cache = DfsBlockCache.getInstance();
}
}

4
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java

@ -274,7 +274,7 @@ public final class DfsBlockCache {
int slot = slot(key, position);
HashEntry e1 = table.get(slot);
DfsBlock v = scan(e1, key, position);
if (v != null) {
if (v != null && v.contains(key, requestedPosition)) {
ctx.stats.blockCacheHit++;
statHit.incrementAndGet();
return v;
@ -298,7 +298,7 @@ public final class DfsBlockCache {
statMiss.incrementAndGet();
boolean credit = true;
try {
v = file.readOneBlock(position, ctx, fileChannel);
v = file.readOneBlock(requestedPosition, ctx, fileChannel);
credit = false;
} finally {
if (credit)

26
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/InMemoryRepository.java

@ -53,7 +53,7 @@ public class InMemoryRepository extends DfsRepository {
static final AtomicInteger packId = new AtomicInteger();
private final DfsObjDatabase objdb;
private final MemObjDatabase objdb;
private final RefDatabase refdb;
private String gitwebDescription;
private boolean performsAtomicTransactions = true;
@ -75,7 +75,7 @@ public class InMemoryRepository extends DfsRepository {
}
@Override
public DfsObjDatabase getObjectDatabase() {
public MemObjDatabase getObjectDatabase() {
return objdb;
}
@ -106,13 +106,23 @@ public class InMemoryRepository extends DfsRepository {
gitwebDescription = d;
}
private class MemObjDatabase extends DfsObjDatabase {
/** DfsObjDatabase used by InMemoryRepository. */
public class MemObjDatabase extends DfsObjDatabase {
private List<DfsPackDescription> packs = new ArrayList<>();
private int blockSize;
MemObjDatabase(DfsRepository repo) {
super(repo, new DfsReaderOptions());
}
/**
* @param blockSize
* force a different block size for testing.
*/
public void setReadableChannelBlockSizeForTest(int blockSize) {
this.blockSize = blockSize;
}
@Override
protected synchronized List<DfsPackDescription> listPacks() {
return packs;
@ -152,7 +162,7 @@ public class InMemoryRepository extends DfsRepository {
byte[] file = memPack.fileMap.get(ext);
if (file == null)
throw new FileNotFoundException(desc.getFileName(ext));
return new ByteArrayReadableChannel(file);
return new ByteArrayReadableChannel(file, blockSize);
}
@Override
@ -216,13 +226,13 @@ public class InMemoryRepository extends DfsRepository {
private static class ByteArrayReadableChannel implements ReadableChannel {
private final byte[] data;
private final int blockSize;
private int position;
private boolean open = true;
ByteArrayReadableChannel(byte[] buf) {
ByteArrayReadableChannel(byte[] buf, int blockSize) {
data = buf;
this.blockSize = blockSize;
}
@Override
@ -262,7 +272,7 @@ public class InMemoryRepository extends DfsRepository {
@Override
public int blockSize() {
return 0;
return blockSize;
}
@Override

Loading…
Cancel
Save