Browse Source

Break down DfsBlockCache stats by pack file extension.

Change-Id: Iaecf0580279b33e3e2439784528cae7b69fb28bc
Signed-off-by: Minh Thai <mthai@google.com>
stable-4.10
Minh Thai 7 years ago
parent
commit
159da6dacc
  1. 2
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java
  2. 9
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
  3. 202
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java
  4. 3
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java
  5. 28
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsStreamKey.java

2
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DeltaBaseCacheTest.java

@ -64,7 +64,7 @@ public class DeltaBaseCacheTest {
@Before @Before
public void setUp() { public void setUp() {
DfsRepositoryDescription repo = new DfsRepositoryDescription("test"); DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
key = DfsStreamKey.of(repo, "test.key"); key = DfsStreamKey.of(repo, "test.key", null);
cache = new DeltaBaseCache(SZ); cache = new DeltaBaseCache(SZ);
rng = new TestRng(getClass().getSimpleName()); rng = new TestRng(getClass().getSimpleName());
} }

9
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java

@ -50,6 +50,7 @@ import static org.junit.Assert.assertTrue;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.stream.LongStream;
import org.eclipse.jgit.junit.TestRng; import org.eclipse.jgit.junit.TestRng;
import org.eclipse.jgit.lib.ObjectId; import org.eclipse.jgit.lib.ObjectId;
@ -84,9 +85,9 @@ public class DfsBlockCacheTest {
ins.flush(); ins.flush();
} }
long oldSize = cache.getCurrentSize(); long oldSize = LongStream.of(cache.getCurrentSize()).sum();
assertTrue(oldSize > 2000); assertTrue(oldSize > 2000);
assertEquals(0, cache.getHitCount()); assertEquals(0, LongStream.of(cache.getHitCount()).sum());
List<DfsPackDescription> packs = r1.getObjectDatabase().listPacks(); List<DfsPackDescription> packs = r1.getObjectDatabase().listPacks();
InMemoryRepository r2 = new InMemoryRepository(repo); InMemoryRepository r2 = new InMemoryRepository(repo);
@ -95,8 +96,8 @@ public class DfsBlockCacheTest {
byte[] actual = rdr.open(id, OBJ_BLOB).getBytes(); byte[] actual = rdr.open(id, OBJ_BLOB).getBytes();
assertTrue(Arrays.equals(content, actual)); assertTrue(Arrays.equals(content, actual));
} }
assertEquals(0, cache.getMissCount()); assertEquals(0, LongStream.of(cache.getMissCount()).sum());
assertEquals(oldSize, cache.getCurrentSize()); assertEquals(oldSize, LongStream.of(cache.getCurrentSize()).sum());
} }
@SuppressWarnings("resource") @SuppressWarnings("resource")

202
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java

@ -46,11 +46,14 @@ package org.eclipse.jgit.internal.storage.dfs;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.LongStream;
import org.eclipse.jgit.annotations.Nullable; import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.internal.JGitText; import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.pack.PackExt;
/** /**
* Caches slices of a {@link BlockBasedFile} in memory for faster read access. * Caches slices of a {@link BlockBasedFile} in memory for faster read access.
@ -61,8 +64,8 @@ import org.eclipse.jgit.internal.JGitText;
* these tiny reads into larger block-sized IO operations. * these tiny reads into larger block-sized IO operations.
* <p> * <p>
* Whenever a cache miss occurs, loading is invoked by exactly one thread for * Whenever a cache miss occurs, loading is invoked by exactly one thread for
* the given <code>(DfsPackKey,position)</code> key tuple. This is ensured by an * the given <code>(DfsStreamKey,position)</code> key tuple. This is ensured by
* array of locks, with the tuple hashed to a lock instance. * an array of locks, with the tuple hashed to a lock instance.
* <p> * <p>
* Its too expensive during object access to be accurate with a least recently * Its too expensive during object access to be accurate with a least recently
* used (LRU) algorithm. Strictly ordering every read is a lot of overhead that * used (LRU) algorithm. Strictly ordering every read is a lot of overhead that
@ -143,14 +146,27 @@ public final class DfsBlockCache {
/** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */ /** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */
private final int blockSizeShift; private final int blockSizeShift;
/** Number of times a block was found in the cache. */ /**
private final AtomicLong statHit; * Number of times a block was found in the cache, per pack file extension.
*/
private final AtomicReference<AtomicLong[]> statHit;
/**
* Number of times a block was not found, and had to be loaded, per pack
* file extension.
*/
private final AtomicReference<AtomicLong[]> statMiss;
/** Number of times a block was not found, and had to be loaded. */ /**
private final AtomicLong statMiss; * Number of blocks evicted due to cache being full, per pack file
* extension.
*/
private final AtomicReference<AtomicLong[]> statEvict;
/** Number of blocks evicted due to cache being full. */ /**
private volatile long statEvict; * Number of bytes currently loaded in the cache, per pack file extension.
*/
private final AtomicReference<AtomicLong[]> liveBytes;
/** Protects the clock and its related data. */ /** Protects the clock and its related data. */
private final ReentrantLock clockLock; private final ReentrantLock clockLock;
@ -158,9 +174,6 @@ public final class DfsBlockCache {
/** Current position of the clock. */ /** Current position of the clock. */
private Ref clockHand; private Ref clockHand;
/** Number of bytes currently loaded in the cache. */
private volatile long liveBytes;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private DfsBlockCache(final DfsBlockCacheConfig cfg) { private DfsBlockCache(final DfsBlockCacheConfig cfg) {
tableSize = tableSize(cfg); tableSize = tableSize(cfg);
@ -180,56 +193,90 @@ public final class DfsBlockCache {
clockLock = new ReentrantLock(true /* fair */); clockLock = new ReentrantLock(true /* fair */);
String none = ""; //$NON-NLS-1$ String none = ""; //$NON-NLS-1$
clockHand = new Ref<>( clockHand = new Ref<>(
DfsStreamKey.of(new DfsRepositoryDescription(none), none), DfsStreamKey.of(new DfsRepositoryDescription(none), none, null),
-1, 0, null); -1, 0, null);
clockHand.next = clockHand; clockHand.next = clockHand;
statHit = new AtomicLong(); statHit = new AtomicReference<>(newCounters());
statMiss = new AtomicLong(); statMiss = new AtomicReference<>(newCounters());
statEvict = new AtomicReference<>(newCounters());
liveBytes = new AtomicReference<>(newCounters());
} }
boolean shouldCopyThroughCache(long length) { boolean shouldCopyThroughCache(long length) {
return length <= maxStreamThroughCache; return length <= maxStreamThroughCache;
} }
/** @return total number of bytes in the cache. */ /** @return total number of bytes in the cache, per pack file extension. */
public long getCurrentSize() { public long[] getCurrentSize() {
return liveBytes; return getStatVals(liveBytes);
} }
/** @return 0..100, defining how full the cache is. */ /** @return 0..100, defining how full the cache is. */
public long getFillPercentage() { public long getFillPercentage() {
return getCurrentSize() * 100 / maxBytes; return LongStream.of(getCurrentSize()).sum() * 100 / maxBytes;
} }
/** @return number of requests for items in the cache. */ /**
public long getHitCount() { * @return number of requests for items in the cache, per pack file
return statHit.get(); * extension.
*/
public long[] getHitCount() {
return getStatVals(statHit);
} }
/** @return number of requests for items not in the cache. */ /**
public long getMissCount() { * @return number of requests for items not in the cache, per pack file
return statMiss.get(); * extension.
*/
public long[] getMissCount() {
return getStatVals(statMiss);
} }
/** @return total number of requests (hit + miss). */ /**
public long getTotalRequestCount() { * @return total number of requests (hit + miss), per pack file extension.
return getHitCount() + getMissCount(); */
public long[] getTotalRequestCount() {
AtomicLong[] hit = statHit.get();
AtomicLong[] miss = statMiss.get();
long[] cnt = new long[Math.max(hit.length, miss.length)];
for (int i = 0; i < hit.length; i++) {
cnt[i] += hit[i].get();
}
for (int i = 0; i < miss.length; i++) {
cnt[i] += miss[i].get();
}
return cnt;
} }
/** @return 0..100, defining number of cache hits. */ /**
public long getHitRatio() { * @return 0..100, defining number of cache hits, per pack file extension.
long hits = statHit.get(); */
long miss = statMiss.get(); public long[] getHitRatio() {
long total = hits + miss; AtomicLong[] hit = statHit.get();
if (total == 0) AtomicLong[] miss = statMiss.get();
return 0; long[] ratio = new long[Math.max(hit.length, miss.length)];
return hits * 100 / total; for (int i = 0; i < ratio.length; i++) {
if (i >= hit.length) {
ratio[i] = 0;
} else if (i >= miss.length) {
ratio[i] = 100;
} else {
long hitVal = hit[i].get();
long missVal = miss[i].get();
long total = hitVal + missVal;
ratio[i] = total == 0 ? 0 : hitVal * 100 / total;
}
}
return ratio;
} }
/** @return number of evictions performed due to cache being full. */ /**
public long getEvictions() { * @return number of evictions performed due to cache being full, per pack
return statEvict; * file extension.
*/
public long[] getEvictions() {
return getStatVals(statEvict);
} }
private int hash(int packHash, long off) { private int hash(int packHash, long off) {
@ -276,11 +323,11 @@ public final class DfsBlockCache {
DfsBlock v = scan(e1, key, position); DfsBlock v = scan(e1, key, position);
if (v != null && v.contains(key, requestedPosition)) { if (v != null && v.contains(key, requestedPosition)) {
ctx.stats.blockCacheHit++; ctx.stats.blockCacheHit++;
statHit.incrementAndGet(); getStat(statHit, key).incrementAndGet();
return v; return v;
} }
reserveSpace(blockSize); reserveSpace(blockSize, key);
ReentrantLock regionLock = lockFor(key, position); ReentrantLock regionLock = lockFor(key, position);
regionLock.lock(); regionLock.lock();
try { try {
@ -289,20 +336,20 @@ public final class DfsBlockCache {
v = scan(e2, key, position); v = scan(e2, key, position);
if (v != null) { if (v != null) {
ctx.stats.blockCacheHit++; ctx.stats.blockCacheHit++;
statHit.incrementAndGet(); getStat(statHit, key).incrementAndGet();
creditSpace(blockSize); creditSpace(blockSize, key);
return v; return v;
} }
} }
statMiss.incrementAndGet(); getStat(statMiss, key).incrementAndGet();
boolean credit = true; boolean credit = true;
try { try {
v = file.readOneBlock(requestedPosition, ctx, fileChannel); v = file.readOneBlock(requestedPosition, ctx, fileChannel);
credit = false; credit = false;
} finally { } finally {
if (credit) if (credit)
creditSpace(blockSize); creditSpace(blockSize, key);
} }
if (position != v.start) { if (position != v.start) {
// The file discovered its blockSize and adjusted. // The file discovered its blockSize and adjusted.
@ -332,10 +379,10 @@ public final class DfsBlockCache {
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private void reserveSpace(int reserve) { private void reserveSpace(int reserve, DfsStreamKey key) {
clockLock.lock(); clockLock.lock();
try { try {
long live = liveBytes + reserve; long live = LongStream.of(getCurrentSize()).sum() + reserve;
if (maxBytes < live) { if (maxBytes < live) {
Ref prev = clockHand; Ref prev = clockHand;
Ref hand = clockHand.next; Ref hand = clockHand.next;
@ -358,19 +405,20 @@ public final class DfsBlockCache {
dead.next = null; dead.next = null;
dead.value = null; dead.value = null;
live -= dead.size; live -= dead.size;
statEvict++; getStat(liveBytes, dead.key).addAndGet(-dead.size);
getStat(statEvict, dead.key).incrementAndGet();
} while (maxBytes < live); } while (maxBytes < live);
clockHand = prev; clockHand = prev;
} }
liveBytes = live; getStat(liveBytes, key).addAndGet(reserve);
} finally { } finally {
clockLock.unlock(); clockLock.unlock();
} }
} }
private void creditSpace(int credit) { private void creditSpace(int credit, DfsStreamKey key) {
clockLock.lock(); clockLock.lock();
liveBytes -= credit; getStat(liveBytes, key).addAndGet(-credit);
clockLock.unlock(); clockLock.unlock();
} }
@ -378,8 +426,9 @@ public final class DfsBlockCache {
private void addToClock(Ref ref, int credit) { private void addToClock(Ref ref, int credit) {
clockLock.lock(); clockLock.lock();
try { try {
if (credit != 0) if (credit != 0) {
liveBytes -= credit; getStat(liveBytes, ref.key).addAndGet(-credit);
}
Ref ptr = clockHand; Ref ptr = clockHand;
ref.next = ptr.next; ref.next = ptr.next;
ptr.next = ref; ptr.next = ref;
@ -404,7 +453,7 @@ public final class DfsBlockCache {
if (ref != null) if (ref != null)
return ref; return ref;
reserveSpace(size); reserveSpace(size, key);
ReentrantLock regionLock = lockFor(key, pos); ReentrantLock regionLock = lockFor(key, pos);
regionLock.lock(); regionLock.lock();
try { try {
@ -412,7 +461,7 @@ public final class DfsBlockCache {
if (e2 != e1) { if (e2 != e1) {
ref = scanRef(e2, key, pos); ref = scanRef(e2, key, pos);
if (ref != null) { if (ref != null) {
creditSpace(size); creditSpace(size, key);
return ref; return ref;
} }
} }
@ -440,9 +489,9 @@ public final class DfsBlockCache {
<T> T get(DfsStreamKey key, long position) { <T> T get(DfsStreamKey key, long position) {
T val = (T) scan(table.get(slot(key, position)), key, position); T val = (T) scan(table.get(slot(key, position)), key, position);
if (val == null) if (val == null)
statMiss.incrementAndGet(); getStat(statMiss, key).incrementAndGet();
else else
statHit.incrementAndGet(); getStat(statHit, key).incrementAndGet();
return val; return val;
} }
@ -454,9 +503,9 @@ public final class DfsBlockCache {
<T> Ref<T> getRef(DfsStreamKey key) { <T> Ref<T> getRef(DfsStreamKey key) {
Ref<T> r = scanRef(table.get(slot(key, 0)), key, 0); Ref<T> r = scanRef(table.get(slot(key, 0)), key, 0);
if (r != null) if (r != null)
statHit.incrementAndGet(); getStat(statHit, key).incrementAndGet();
else else
statMiss.incrementAndGet(); getStat(statMiss, key).incrementAndGet();
return r; return r;
} }
@ -478,6 +527,43 @@ public final class DfsBlockCache {
return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length]; return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length];
} }
private static AtomicLong[] newCounters() {
AtomicLong[] ret = new AtomicLong[PackExt.values().length];
for (int i = 0; i < ret.length; i++) {
ret[i] = new AtomicLong();
}
return ret;
}
private static AtomicLong getStat(AtomicReference<AtomicLong[]> stats,
DfsStreamKey key) {
int pos = key.packExtPos;
while (true) {
AtomicLong[] vals = stats.get();
if (pos < vals.length) {
return vals[pos];
}
AtomicLong[] expect = vals;
vals = new AtomicLong[Math.max(pos + 1, PackExt.values().length)];
System.arraycopy(expect, 0, vals, 0, expect.length);
for (int i = expect.length; i < vals.length; i++) {
vals[i] = new AtomicLong();
}
if (stats.compareAndSet(expect, vals)) {
return vals[pos];
}
}
}
private static long[] getStatVals(AtomicReference<AtomicLong[]> stat) {
AtomicLong[] stats = stat.get();
long[] cnt = new long[stats.length];
for (int i = 0; i < stats.length; i++) {
cnt[i] = stats[i].get();
}
return cnt;
}
private static HashEntry clean(HashEntry top) { private static HashEntry clean(HashEntry top) {
while (top != null && top.ref.next == null) while (top != null && top.ref.next == null)
top = top.next; top = top.next;

3
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsPackDescription.java

@ -143,7 +143,8 @@ public class DfsPackDescription implements Comparable<DfsPackDescription> {
* @return cache key for use by the block cache. * @return cache key for use by the block cache.
*/ */
public DfsStreamKey getStreamKey(PackExt ext) { public DfsStreamKey getStreamKey(PackExt ext) {
return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext)); return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext),
ext);
} }
/** @return the source of the pack. */ /** @return the source of the pack. */

28
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsStreamKey.java

@ -47,6 +47,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Arrays; import java.util.Arrays;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.internal.storage.pack.PackExt;
/** Key used by {@link DfsBlockCache} to disambiguate streams. */ /** Key used by {@link DfsBlockCache} to disambiguate streams. */
public abstract class DfsStreamKey { public abstract class DfsStreamKey {
/** /**
@ -54,22 +57,30 @@ public abstract class DfsStreamKey {
* description of the containing repository. * description of the containing repository.
* @param name * @param name
* compute the key from a string name. * compute the key from a string name.
* @param ext
* pack file extension, or {@code null}.
* @return key for {@code name} * @return key for {@code name}
*/ */
public static DfsStreamKey of(DfsRepositoryDescription repo, String name) { public static DfsStreamKey of(DfsRepositoryDescription repo, String name,
return new ByteArrayDfsStreamKey(repo, name.getBytes(UTF_8)); @Nullable PackExt ext) {
return new ByteArrayDfsStreamKey(repo, name.getBytes(UTF_8), ext);
} }
final int hash; final int hash;
final int packExtPos;
/** /**
* @param hash * @param hash
* hash of the other identifying components of the key. * hash of the other identifying components of the key.
* @param ext
* pack file extension, or {@code null}.
*/ */
protected DfsStreamKey(int hash) { protected DfsStreamKey(int hash, @Nullable PackExt ext) {
// Multiply by 31 here so we can more directly combine with another // Multiply by 31 here so we can more directly combine with another
// value without doing the multiply there. // value without doing the multiply there.
this.hash = hash * 31; this.hash = hash * 31;
this.packExtPos = ext == null ? 0 : ext.getPosition();
} }
@Override @Override
@ -88,10 +99,12 @@ public abstract class DfsStreamKey {
private static final class ByteArrayDfsStreamKey extends DfsStreamKey { private static final class ByteArrayDfsStreamKey extends DfsStreamKey {
private final DfsRepositoryDescription repo; private final DfsRepositoryDescription repo;
private final byte[] name; private final byte[] name;
ByteArrayDfsStreamKey(DfsRepositoryDescription repo, byte[] name) { ByteArrayDfsStreamKey(DfsRepositoryDescription repo, byte[] name,
super(repo.hashCode() * 31 + Arrays.hashCode(name)); @Nullable PackExt ext) {
super(repo.hashCode() * 31 + Arrays.hashCode(name), ext);
this.repo = repo; this.repo = repo;
this.name = name; this.name = name;
} }
@ -100,8 +113,7 @@ public abstract class DfsStreamKey {
public boolean equals(Object o) { public boolean equals(Object o) {
if (o instanceof ByteArrayDfsStreamKey) { if (o instanceof ByteArrayDfsStreamKey) {
ByteArrayDfsStreamKey k = (ByteArrayDfsStreamKey) o; ByteArrayDfsStreamKey k = (ByteArrayDfsStreamKey) o;
return hash == k.hash return hash == k.hash && repo.equals(k.repo)
&& repo.equals(k.repo)
&& Arrays.equals(name, k.name); && Arrays.equals(name, k.name);
} }
return false; return false;
@ -112,7 +124,7 @@ public abstract class DfsStreamKey {
private final DfsStreamKey idxKey; private final DfsStreamKey idxKey;
ForReverseIndex(DfsStreamKey idxKey) { ForReverseIndex(DfsStreamKey idxKey) {
super(idxKey.hash + 1); super(idxKey.hash + 1, null);
this.idxKey = idxKey; this.idxKey = idxKey;
} }

Loading…
Cancel
Save