Browse Source

file: implement FileReftableDatabase

Reftable is a binary, block-based storage format for the ref-database.
It provides several advantages over the traditional packed + loose
storage format:

 * O(1) write performance, even for deletions and transactions.

 * atomic updates to the ref database.

 * O(log N) lookup and prefix scans

 * free from restrictions imposed by the file system: it is
   case-sensitive even on case-insensitive file systems, and has
   no inherent limitations for directory/file conflicts

 * prefix compression reduces space usage for repetitive ref names,
   such as gerrit's refs/changes/xx/xxxxx format.

FileReftableDatabase is based on FileReftableStack, which does
compactions inline. This is simple, and has good median performance,
but every so often it will rewrite the entire ref database.

For testing, a FileReftableTest (mirroring RefUpdateTest) is added to
check for Reftable specific behavior. This must be done separately, as
reflogs have different semantics.

Add a reftable flavor of BatchRefUpdateTest.

Add a FileReftableStackTest to exercise compaction.

Add FileRepository#convertToReftable so existing testdata can be
reused.

CQ: 21007
Change-Id: I1837f268e91c6b446cb0155061727dbaccb714b8
Signed-off-by: Han-Wen Nienhuys <hanwen@google.com>
Signed-off-by: Matthias Sohn <matthias.sohn@sap.com>
next
Han-Wen Nienhuys 5 years ago committed by Matthias Sohn
parent
commit
38586d54d0
  1. 137
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/BatchRefUpdateTest.java
  2. 203
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/FileReftableStackTest.java
  3. 553
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/FileReftableTest.java
  4. 58
      org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/RefUpdateTest.java
  5. 6
      org.eclipse.jgit/resources/org/eclipse/jgit/internal/JGitText.properties
  6. 6
      org.eclipse.jgit/src/org/eclipse/jgit/internal/JGitText.java
  7. 651
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileReftableDatabase.java
  8. 768
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileReftableStack.java
  9. 188
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileRepository.java
  10. 24
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/GC.java
  11. 2
      org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/reftable/ReftableWriter.java
  12. 6
      org.eclipse.jgit/src/org/eclipse/jgit/lib/Constants.java

137
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/BatchRefUpdateTest.java

@ -61,6 +61,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeFalse;
import static org.junit.Assume.assumeTrue;
import java.io.File;
@ -109,13 +110,18 @@ import org.junit.runners.Parameterized.Parameters;
@SuppressWarnings("boxing")
@RunWith(Parameterized.class)
public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Parameter
@Parameter(0)
public boolean atomic;
@Parameters(name = "atomic={0}")
@Parameter(1)
public boolean useReftable;
@Parameters(name = "atomic={0} reftable={1}")
public static Collection<Object[]> data() {
return Arrays
.asList(new Object[][] { { Boolean.FALSE }, { Boolean.TRUE } });
return Arrays.asList(new Object[][] { { Boolean.FALSE, Boolean.FALSE },
{ Boolean.TRUE, Boolean.FALSE },
{ Boolean.FALSE, Boolean.TRUE },
{ Boolean.TRUE, Boolean.TRUE }, });
}
private Repository diskRepo;
@ -126,7 +132,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
private RevCommit A;
private RevCommit B;
private RevCommit B; // B descends from A.
/**
* When asserting the number of RefsChangedEvents you must account for one
@ -148,11 +154,18 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
public void setUp() throws Exception {
super.setUp();
diskRepo = createBareRepository();
FileRepository fileRepo = createBareRepository();
if (useReftable) {
fileRepo.convertToReftable(false, false);
}
diskRepo = fileRepo;
setLogAllRefUpdates(true);
refdir = (RefDirectory) diskRepo.getRefDatabase();
refdir.setRetrySleepMs(Arrays.asList(0, 0));
if (!useReftable) {
refdir = (RefDirectory) diskRepo.getRefDatabase();
refdir.setRetrySleepMs(Arrays.asList(0, 0));
}
repo = new TestRepository<>(diskRepo);
A = repo.commit().create();
@ -171,6 +184,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void packedRefsFileIsSorted() throws IOException {
assumeTrue(atomic);
assumeFalse(useReftable);
for (int i = 0; i < 2; i++) {
BatchRefUpdate bu = diskRepo.getRefDatabase().newBatchUpdate();
@ -198,8 +212,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void simpleNoForce() throws IOException {
writeLooseRef("refs/heads/master", A);
writeLooseRef("refs/heads/masters", B);
writeLooseRefs("refs/heads/master", A, "refs/heads/masters", B);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE),
@ -220,8 +233,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void simpleForce() throws IOException {
writeLooseRef("refs/heads/master", A);
writeLooseRef("refs/heads/masters", B);
writeLooseRefs("refs/heads/master", A, "refs/heads/masters", B);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE),
@ -231,7 +243,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/master", B, "refs/heads/masters", A);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
}
@Test
@ -258,8 +270,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void fileDirectoryConflict() throws IOException {
writeLooseRef("refs/heads/master", A);
writeLooseRef("refs/heads/masters", B);
writeLooseRefs("refs/heads/master", A, "refs/heads/masters", B);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE),
@ -269,16 +280,14 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
if (atomic) {
// Atomic update sees that master and master/x are conflicting, then
// marks
// the first one in the list as LOCK_FAILURE and aborts the rest.
// marks the first one in the list as LOCK_FAILURE and aborts the rest.
assertResults(cmds, LOCK_FAILURE, TRANSACTION_ABORTED,
TRANSACTION_ABORTED);
assertRefs("refs/heads/master", A, "refs/heads/masters", B);
assertEquals(1, refsChangedEvents);
} else {
// Non-atomic updates are applied in order: master succeeds, then
// master/x
// fails due to conflict.
// master/x fails due to conflict.
assertResults(cmds, OK, LOCK_FAILURE, LOCK_FAILURE);
assertRefs("refs/heads/master", B, "refs/heads/masters", B);
assertEquals(2, refsChangedEvents);
@ -287,8 +296,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void conflictThanksToDelete() throws IOException {
writeLooseRef("refs/heads/master", A);
writeLooseRef("refs/heads/masters", B);
writeLooseRefs("refs/heads/master", A, "refs/heads/masters", B);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE),
@ -300,7 +308,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertRefs("refs/heads/master", B, "refs/heads/masters/x", A);
if (atomic) {
assertEquals(2, refsChangedEvents);
} else {
} else if (!useReftable) {
// The non-atomic case actually produces 5 events, but that's an
// implementation detail. We expect at least 4 events, one for the
// initial read due to writeLooseRef(), and then one for each
@ -427,7 +435,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/master", B, "refs/heads/branch", B);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
assertReflogUnchanged(oldLogs, "refs/heads/master");
assertReflogUnchanged(oldLogs, "refs/heads/branch");
}
@ -448,7 +456,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/master", B, "refs/heads/branch1", B,
"refs/heads/branch2", A);
assertEquals(atomic ? 3 : 4, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 3 : 4, refsChangedEvents);
assertReflogEquals(reflog(A, B, new PersonIdent(diskRepo), "a reflog"),
getLastReflog("refs/heads/master"));
assertReflogEquals(
@ -473,7 +481,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK, OK);
assertRefs("refs/heads/master", B, "refs/heads/branch1", A,
"refs/heads/branch2", A);
assertEquals(atomic ? 3 : 5, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 3 : 5, refsChangedEvents);
assertReflogEquals(
// Always forced; setAllowNonFastForwards(true) bypasses the
// check.
@ -514,7 +522,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/master", B, "refs/heads/branch", A);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
assertReflogEquals(
reflog(A, B, new PersonIdent(diskRepo),
"a reflog: fast-forward"),
@ -538,7 +546,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
.setRefLogIdent(ident));
assertResults(cmds, OK, OK);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
assertRefs("refs/heads/master", B, "refs/heads/branch", B);
assertReflogEquals(reflog(A, B, ident, "a reflog"),
getLastReflog("refs/heads/master"), true);
@ -560,8 +568,15 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/branch", B);
assertEquals(atomic ? 3 : 4, refsChangedEvents);
assertNull(getLastReflog("refs/heads/master"));
assertEquals(batchesRefUpdates() ? 3 : 4, refsChangedEvents);
if (useReftable) {
// reftable retains reflog entries for deleted branches.
assertReflogEquals(
reflog(A, zeroId(), new PersonIdent(diskRepo), "a reflog"),
getLastReflog("refs/heads/master"));
} else {
assertNull(getLastReflog("refs/heads/master"));
}
assertReflogEquals(reflog(A, B, new PersonIdent(diskRepo), "a reflog"),
getLastReflog("refs/heads/branch"));
}
@ -577,8 +592,11 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
assertResults(cmds, OK, OK);
assertRefs("refs/heads/master/x", A);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertNull(getLastReflog("refs/heads/master"));
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
if (!useReftable) {
// reftable retains reflog entries for deleted branches.
assertNull(getLastReflog("refs/heads/master"));
}
assertReflogEquals(
reflog(zeroId(), A, new PersonIdent(diskRepo), "a reflog"),
getLastReflog("refs/heads/master/x"));
@ -624,7 +642,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
.setRefLogMessage("a reflog", true));
assertResults(cmds, OK, OK);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
assertReflogEquals(reflog(A, B, ident, "custom log"),
getLastReflog("refs/heads/master"), true);
assertReflogEquals(reflog(zeroId(), B, ident, "a reflog: created"),
@ -645,7 +663,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
execute(newBatchUpdate(cmds).setRefLogMessage("a reflog", true));
assertResults(cmds, OK, OK);
assertEquals(atomic ? 2 : 3, refsChangedEvents);
assertEquals(batchesRefUpdates() ? 2 : 3, refsChangedEvents);
assertReflogUnchanged(oldLogs, "refs/heads/master");
assertReflogEquals(
reflog(zeroId(), B, new PersonIdent(diskRepo),
@ -655,6 +673,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void refLogNotWrittenWithoutConfigOption() throws Exception {
assumeFalse(useReftable);
setLogAllRefUpdates(false);
writeRef("refs/heads/master", A);
@ -674,6 +693,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void forceRefLogInUpdate() throws Exception {
assumeFalse(useReftable);
setLogAllRefUpdates(false);
writeRef("refs/heads/master", A);
assertTrue(getLastReflogs("refs/heads/master", "refs/heads/branch")
@ -695,6 +715,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void forceRefLogInCommand() throws Exception {
assumeFalse(useReftable);
setLogAllRefUpdates(false);
writeRef("refs/heads/master", A);
@ -717,6 +738,8 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void packedRefsLockFailure() throws Exception {
assumeFalse(useReftable);
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays.asList(
@ -748,6 +771,8 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void oneRefLockFailure() throws Exception {
assumeFalse(useReftable);
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays.asList(
@ -778,6 +803,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void singleRefUpdateDoesNotRequirePackedRefsLock() throws Exception {
assumeFalse(useReftable);
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays
@ -799,6 +825,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Test
public void atomicUpdateRespectsInProcessLock() throws Exception {
assumeTrue(atomic);
assumeFalse(useReftable);
writeLooseRef("refs/heads/master", A);
@ -857,7 +884,38 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
}
private void writeLooseRef(String name, AnyObjectId id) throws IOException {
write(new File(diskRepo.getDirectory(), name), id.name() + "\n");
if (useReftable) {
writeRef(name, id);
} else {
write(new File(diskRepo.getDirectory(), name), id.name() + "\n");
}
}
private void writeLooseRefs(String name1, AnyObjectId id1, String name2,
AnyObjectId id2) throws IOException {
if (useReftable) {
BatchRefUpdate bru = diskRepo.getRefDatabase().newBatchUpdate();
Ref r1 = diskRepo.exactRef(name1);
ReceiveCommand c1 = new ReceiveCommand(
r1 != null ? r1.getObjectId() : ObjectId.zeroId(),
id1.toObjectId(), name1, r1 == null ? CREATE : UPDATE);
Ref r2 = diskRepo.exactRef(name2);
ReceiveCommand c2 = new ReceiveCommand(
r2 != null ? r2.getObjectId() : ObjectId.zeroId(),
id2.toObjectId(), name2, r2 == null ? CREATE : UPDATE);
bru.addCommand(c1, c2);
try (RevWalk rw = new RevWalk(diskRepo)) {
bru.execute(rw, NullProgressMonitor.INSTANCE);
}
assertEquals(c2.getResult(), ReceiveCommand.Result.OK);
assertEquals(c1.getResult(), ReceiveCommand.Result.OK);
} else {
writeLooseRef(name1, id1);
writeLooseRef(name2, id2);
}
}
private void writeRef(String name, AnyObjectId id) throws IOException {
@ -876,7 +934,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
}
private BatchRefUpdate newBatchUpdate(List<ReceiveCommand> cmds) {
BatchRefUpdate u = refdir.newBatchUpdate();
BatchRefUpdate u = diskRepo.getRefDatabase().newBatchUpdate();
if (atomic) {
assertTrue(u.isAtomic());
} else {
@ -909,7 +967,8 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
expected.put((String) args[i], (AnyObjectId) args[i + 1]);
}
Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
Map<String, Ref> refs = diskRepo.getRefDatabase()
.getRefs(RefDatabase.ALL);
Ref actualHead = refs.remove(Constants.HEAD);
if (actualHead != null) {
String actualLeafName = actualHead.getLeaf().getName();
@ -958,7 +1017,7 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
ReceiveCommand c = cmds.get(i);
Result r = expected[i];
assertTrue(String.format(
"result of command (%d) should be %s: %s %s%s",
"result of command (%d) should be %s, got %s %s%s",
Integer.valueOf(i), r, c, c.getResult(),
c.getMessage() != null ? " (" + c.getMessage() + ")" : ""),
r.p.test(c));
@ -1048,4 +1107,8 @@ public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
}
};
}
private boolean batchesRefUpdates() {
return atomic || useReftable;
}
}

203
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/FileReftableStackTest.java

@ -0,0 +1,203 @@
/*
* Copyright (C) 2019 Google LLC
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.file;
import static org.eclipse.jgit.lib.Ref.Storage.PACKED;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.eclipse.jgit.internal.storage.file.FileReftableStack.Segment;
import org.eclipse.jgit.internal.storage.reftable.MergedReftable;
import org.eclipse.jgit.internal.storage.reftable.RefCursor;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectIdRef;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.util.FileUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class FileReftableStackTest {
private static Ref newRef(String name, ObjectId id) {
return new ObjectIdRef.PeeledNonTag(PACKED, name, id);
}
private File reftableDir;
@Before
public void setup() throws Exception {
reftableDir = FileUtils.createTempDir("rtstack", "", null);
}
@After
public void tearDown() throws Exception {
if (reftableDir != null) {
FileUtils.delete(reftableDir, FileUtils.RECURSIVE);
}
}
void writeBranches(FileReftableStack stack, String template, int start,
int N) throws IOException {
for (int i = 0; i < N; i++) {
while (true) {
final long next = stack.getMergedReftable().maxUpdateIndex()
+ 1;
String name = String.format(template,
Integer.valueOf(start + i));
Ref r = newRef(name, ObjectId.zeroId());
boolean ok = stack.addReftable(rw -> {
rw.setMinUpdateIndex(next).setMaxUpdateIndex(next).begin()
.writeRef(r);
});
if (ok) {
break;
}
}
}
}
public void testCompaction(int N) throws Exception {
try (FileReftableStack stack = new FileReftableStack(
new File(reftableDir, "refs"), reftableDir, null,
() -> new Config())) {
writeBranches(stack, "refs/heads/branch%d", 0, N);
MergedReftable table = stack.getMergedReftable();
for (int i = 1; i < N; i++) {
String name = String.format("refs/heads/branch%d",
Integer.valueOf(i));
RefCursor c = table.seekRef(name);
assertTrue(c.next());
assertEquals(ObjectId.zeroId(), c.getRef().getObjectId());
}
List<String> files = Arrays.asList(reftableDir.listFiles()).stream()
.map(File::getName).collect(Collectors.toList());
Collections.sort(files);
assertTrue(files.size() < 20);
FileReftableStack.CompactionStats stats = stack.getStats();
assertEquals(0, stats.failed);
assertTrue(stats.attempted < N);
assertTrue(stats.refCount < FileReftableStack.log(N) * N);
}
}
@Test
public void testCompaction9() throws Exception {
testCompaction(9);
}
@Test
public void testCompaction1024() throws Exception {
testCompaction(1024);
}
@Rule
public final ExpectedException thrown = ExpectedException.none();
@SuppressWarnings({ "resource", "unused" })
@Test
public void missingReftable() throws Exception {
try (FileReftableStack stack = new FileReftableStack(
new File(reftableDir, "refs"), reftableDir, null,
() -> new Config())) {
outer: for (int i = 0; i < 10; i++) {
final long next = stack.getMergedReftable().maxUpdateIndex()
+ 1;
String name = String.format("branch%d", Integer.valueOf(i));
Ref r = newRef(name, ObjectId.zeroId());
boolean ok = stack.addReftable(rw -> {
rw.setMinUpdateIndex(next).setMaxUpdateIndex(next).begin()
.writeRef(r);
});
assertTrue(ok);
List<File> files = Arrays.asList(reftableDir.listFiles());
for (int j = 0; j < files.size(); j++) {
File f = files.get(j);
if (f.getName().endsWith(".ref")) {
assertTrue(f.delete());
break outer;
}
}
}
}
thrown.expect(FileNotFoundException.class);
new FileReftableStack(new File(reftableDir, "refs"), reftableDir, null,
() -> new Config());
}
@Test
public void testSegments() {
long in[] = { 1024, 1024, 1536, 100, 64, 50, 25, 24 };
List<Segment> got = FileReftableStack.segmentSizes(in);
Segment want[] = { new Segment(0, 3, 10, 3584),
new Segment(3, 5, 6, 164), new Segment(5, 6, 5, 50),
new Segment(6, 8, 4, 49), };
assertEquals(got.size(), want.length);
for (int i = 0; i < want.length; i++) {
assertTrue(want[i].equals(got.get(i)));
}
}
@Test
public void testLog2() throws Exception {
assertEquals(10, FileReftableStack.log(1024));
assertEquals(10, FileReftableStack.log(1025));
assertEquals(10, FileReftableStack.log(2047));
}
}

553
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/FileReftableTest.java

@ -0,0 +1,553 @@
/*
* Copyright (C) 2019, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.file;
import static org.eclipse.jgit.lib.RefUpdate.Result.FAST_FORWARD;
import static org.eclipse.jgit.lib.RefUpdate.Result.FORCED;
import static org.eclipse.jgit.lib.RefUpdate.Result.IO_FAILURE;
import static org.eclipse.jgit.lib.RefUpdate.Result.LOCK_FAILURE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.eclipse.jgit.lib.AnyObjectId;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.NullProgressMonitor;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.RefRename;
import org.eclipse.jgit.lib.RefUpdate;
import org.eclipse.jgit.lib.RefUpdate.Result;
import org.eclipse.jgit.lib.ReflogEntry;
import org.eclipse.jgit.lib.ReflogReader;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.test.resources.SampleDataRepositoryTestCase;
import org.eclipse.jgit.transport.ReceiveCommand;
import org.junit.Test;
public class FileReftableTest extends SampleDataRepositoryTestCase {
String bCommit;
@Override
public void setUp() throws Exception {
super.setUp();
Ref b = db.exactRef("refs/heads/b");
bCommit = b.getObjectId().getName();
db.convertToReftable(false, false);
}
@SuppressWarnings("boxing")
@Test
public void testRacyReload() throws Exception {
ObjectId id = db.resolve("master");
int retry = 0;
try (FileRepository repo1 = new FileRepository(db.getDirectory());
FileRepository repo2 = new FileRepository(db.getDirectory())) {
FileRepository repos[] = { repo1, repo2 };
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 2; j++) {
FileRepository repo = repos[j];
RefUpdate u = repo.getRefDatabase().newUpdate(
String.format("branch%d", i * 10 + j), false);
u.setNewObjectId(id);
RefUpdate.Result r = u.update();
if (!r.equals(Result.NEW)) {
retry++;
u = repo.getRefDatabase().newUpdate(
String.format("branch%d", i * 10 + j), false);
u.setNewObjectId(id);
r = u.update();
assertEquals(r, Result.NEW);
}
}
}
// only the first one succeeds
assertEquals(retry, 19);
}
}
@Test
public void testCompactFully() throws Exception {
ObjectId c1 = db.resolve("master^^");
ObjectId c2 = db.resolve("master^");
for (int i = 0; i < 5; i++) {
RefUpdate u = db.updateRef("refs/heads/master");
u.setForceUpdate(true);
u.setNewObjectId((i%2) == 0 ? c1 : c2);
assertEquals(u.update(), FORCED);
}
File tableDir = new File(db.getDirectory(), Constants.REFTABLE);
assertTrue(tableDir.listFiles().length > 1);
((FileReftableDatabase)db.getRefDatabase()).compactFully();
assertEquals(tableDir.listFiles().length,1);
}
@Test
public void testConvert() throws Exception {
Ref h = db.exactRef("HEAD");
assertTrue(h.isSymbolic());
assertEquals("refs/heads/master", h.getTarget().getName());
Ref b = db.exactRef("refs/heads/b");
assertFalse(b.isSymbolic());
assertTrue(b.isPeeled());
assertEquals(bCommit, b.getObjectId().name());
}
@Test
public void testConvertToRefdir() throws Exception {
db.convertToPackedRefs(false);
assertTrue(db.getRefDatabase() instanceof RefDirectory);
Ref h = db.exactRef("HEAD");
assertTrue(h.isSymbolic());
assertEquals("refs/heads/master", h.getTarget().getName());
Ref b = db.exactRef("refs/heads/b");
assertFalse(b.isSymbolic());
assertTrue(b.isPeeled());
assertEquals(bCommit, b.getObjectId().name());
}
@Test
public void testBatchrefUpdate() throws Exception {
ObjectId cur = db.resolve("master");
ObjectId prev = db.resolve("master^");
PersonIdent person = new PersonIdent("name", "mail@example.com");
ReceiveCommand rc1 = new ReceiveCommand(ObjectId.zeroId(), cur, "refs/heads/batch1");
ReceiveCommand rc2 = new ReceiveCommand(ObjectId.zeroId(), prev, "refs/heads/batch2");
String msg = "message";
try (RevWalk rw = new RevWalk(db)) {
db.getRefDatabase().newBatchUpdate()
.addCommand(rc1, rc2)
.setAtomic(true)
.setRefLogIdent(person)
.setRefLogMessage(msg, false)
.execute(rw, NullProgressMonitor.INSTANCE);
}
assertEquals(rc1.getResult(), ReceiveCommand.Result.OK);
assertEquals(rc2.getResult(), ReceiveCommand.Result.OK);
ReflogEntry e = db.getReflogReader("refs/heads/batch1").getLastEntry();
assertEquals(msg, e.getComment());
assertEquals(person, e.getWho());
assertEquals(cur, e.getNewId());
e = db.getReflogReader("refs/heads/batch2").getLastEntry();
assertEquals(msg, e.getComment());
assertEquals(person, e.getWho());
assertEquals(prev, e.getNewId());
assertEquals(cur, db.exactRef("refs/heads/batch1").getObjectId());
assertEquals(prev, db.exactRef("refs/heads/batch2").getObjectId());
}
@Test
public void testFastforwardStatus() throws Exception {
ObjectId cur = db.resolve("master");
ObjectId prev = db.resolve("master^");
RefUpdate u = db.updateRef("refs/heads/master");
u.setNewObjectId(prev);
u.setForceUpdate(true);
assertEquals(FORCED, u.update());
RefUpdate u2 = db.updateRef("refs/heads/master");
u2.setNewObjectId(cur);
assertEquals(FAST_FORWARD, u2.update());
}
@Test
public void testUpdateChecksOldValue() throws Exception {
ObjectId cur = db.resolve("master");
ObjectId prev = db.resolve("master^");
RefUpdate u1 = db.updateRef("refs/heads/master");
RefUpdate u2 = db.updateRef("refs/heads/master");
u1.setExpectedOldObjectId(cur);
u1.setNewObjectId(prev);
u1.setForceUpdate(true);
u2.setExpectedOldObjectId(cur);
u2.setNewObjectId(prev);
u2.setForceUpdate(true);
assertEquals(FORCED, u1.update());
assertEquals(LOCK_FAILURE, u2.update());
}
@Test
public void testWritesymref() throws Exception {
writeSymref(Constants.HEAD, "refs/heads/a");
assertNotNull(db.exactRef("refs/heads/b"));
}
@Test
public void testFastforwardStatus2() throws Exception {
writeSymref(Constants.HEAD, "refs/heads/a");
ObjectId bId = db.exactRef("refs/heads/b").getObjectId();
RefUpdate u = db.updateRef("refs/heads/a");
u.setNewObjectId(bId);
u.setRefLogMessage("Setup", false);
assertEquals(FAST_FORWARD, u.update());
}
@Test
public void testDelete() throws Exception {
RefUpdate up = db.getRefDatabase().newUpdate("refs/heads/a", false);
up.setForceUpdate(true);
RefUpdate.Result res = up.delete();
assertEquals(res, FORCED);
assertNull(db.exactRef("refs/heads/a"));
}
@Test
public void testDeleteWithoutHead() throws IOException {
// Prepare repository without HEAD
RefUpdate refUpdate = db.updateRef(Constants.HEAD, true);
refUpdate.setForceUpdate(true);
refUpdate.setNewObjectId(ObjectId.zeroId());
RefUpdate.Result updateResult = refUpdate.update();
assertEquals(FORCED, updateResult);
Ref r = db.exactRef("HEAD");
assertEquals(ObjectId.zeroId(), r.getObjectId());
RefUpdate.Result deleteHeadResult = db.updateRef(Constants.HEAD)
.delete();
// why does doDelete say NEW ?
assertEquals(RefUpdate.Result.NO_CHANGE, deleteHeadResult);
// Any result is ok as long as it's not an NPE
db.updateRef(Constants.R_HEADS + "master").delete();
}
@Test
public void testUpdateRefDetached() throws Exception {
ObjectId pid = db.resolve("refs/heads/master");
ObjectId ppid = db.resolve("refs/heads/master^");
RefUpdate updateRef = db.updateRef("HEAD", true);
updateRef.setForceUpdate(true);
updateRef.setNewObjectId(ppid);
RefUpdate.Result update = updateRef.update();
assertEquals(FORCED, update);
assertEquals(ppid, db.resolve("HEAD"));
Ref ref = db.exactRef("HEAD");
assertEquals("HEAD", ref.getName());
assertTrue("is detached", !ref.isSymbolic());
// the branch HEAD referred to is left untouched
assertEquals(pid, db.resolve("refs/heads/master"));
ReflogReader reflogReader = db.getReflogReader("HEAD");
ReflogEntry e = reflogReader.getReverseEntries().get(0);
assertEquals(ppid, e.getNewId());
assertEquals("GIT_COMMITTER_EMAIL", e.getWho().getEmailAddress());
assertEquals("GIT_COMMITTER_NAME", e.getWho().getName());
assertEquals(1250379778000L, e.getWho().getWhen().getTime());
assertEquals(pid, e.getOldId());
}
@Test
public void testWriteReflog() throws Exception {
ObjectId pid = db.resolve("refs/heads/master^");
RefUpdate updateRef = db.updateRef("refs/heads/master");
updateRef.setNewObjectId(pid);
String msg = "REFLOG!";
updateRef.setRefLogMessage(msg, true);
PersonIdent person = new PersonIdent("name", "mail@example.com");
updateRef.setRefLogIdent(person);
updateRef.setForceUpdate(true);
RefUpdate.Result update = updateRef.update();
assertEquals(FORCED, update); // internal
ReflogReader r = db.getReflogReader("refs/heads/master");
ReflogEntry e = r.getLastEntry();
assertEquals(e.getNewId(), pid);
assertEquals(e.getComment(), "REFLOG!: FORCED");
assertEquals(e.getWho(), person);
}
@Test
public void testLooseDelete() throws IOException {
final String newRef = "refs/heads/abc";
assertNull(db.exactRef(newRef));
RefUpdate ref = db.updateRef(newRef);
ObjectId nonZero = db.resolve(Constants.HEAD);
assertNotEquals(nonZero, ObjectId.zeroId());
ref.setNewObjectId(nonZero);
assertEquals(RefUpdate.Result.NEW, ref.update());
ref = db.updateRef(newRef);
ref.setNewObjectId(db.resolve(Constants.HEAD));
assertEquals(ref.delete(), RefUpdate.Result.NO_CHANGE);
// Differs from RefupdateTest. Deleting a loose ref leaves reflog trail.
ReflogReader reader = db.getReflogReader("refs/heads/abc");
assertEquals(ObjectId.zeroId(), reader.getReverseEntry(1).getOldId());
assertEquals(nonZero, reader.getReverseEntry(1).getNewId());
assertEquals(nonZero, reader.getReverseEntry(0).getOldId());
assertEquals(ObjectId.zeroId(), reader.getReverseEntry(0).getNewId());
}
private static class SubclassedId extends ObjectId {
SubclassedId(AnyObjectId src) {
super(src);
}
}
@Test
public void testNoCacheObjectIdSubclass() throws IOException {
final String newRef = "refs/heads/abc";
final RefUpdate ru = updateRef(newRef);
final SubclassedId newid = new SubclassedId(ru.getNewObjectId());
ru.setNewObjectId(newid);
RefUpdate.Result update = ru.update();
assertEquals(RefUpdate.Result.NEW, update);
Ref r = db.exactRef(newRef);
assertEquals(newRef, r.getName());
assertNotNull(r.getObjectId());
assertNotSame(newid, r.getObjectId());
assertSame(ObjectId.class, r.getObjectId().getClass());
assertEquals(newid, r.getObjectId());
List<ReflogEntry> reverseEntries1 = db.getReflogReader("refs/heads/abc")
.getReverseEntries();
ReflogEntry entry1 = reverseEntries1.get(0);
assertEquals(1, reverseEntries1.size());
assertEquals(ObjectId.zeroId(), entry1.getOldId());
assertEquals(r.getObjectId(), entry1.getNewId());
assertEquals(new PersonIdent(db).toString(),
entry1.getWho().toString());
assertEquals("", entry1.getComment());
List<ReflogEntry> reverseEntries2 = db.getReflogReader("HEAD")
.getReverseEntries();
assertEquals(0, reverseEntries2.size());
}
@Test
public void testDeleteSymref() throws IOException {
RefUpdate dst = updateRef("refs/heads/abc");
assertEquals(RefUpdate.Result.NEW, dst.update());
ObjectId id = dst.getNewObjectId();
RefUpdate u = db.updateRef("refs/symref");
assertEquals(RefUpdate.Result.NEW, u.link(dst.getName()));
Ref ref = db.exactRef(u.getName());
assertNotNull(ref);
assertTrue(ref.isSymbolic());
assertEquals(dst.getName(), ref.getLeaf().getName());
assertEquals(id, ref.getLeaf().getObjectId());
u = db.updateRef(u.getName());
u.setDetachingSymbolicRef();
u.setForceUpdate(true);
assertEquals(FORCED, u.delete());
assertNull(db.exactRef(u.getName()));
ref = db.exactRef(dst.getName());
assertNotNull(ref);
assertFalse(ref.isSymbolic());
assertEquals(id, ref.getObjectId());
}
@Test
public void writeUnbornHead() throws Exception {
RefUpdate.Result r = db.updateRef("HEAD").link("refs/heads/unborn");
assertEquals(FORCED, r);
Ref head = db.exactRef("HEAD");
assertTrue(head.isSymbolic());
assertEquals(head.getTarget().getName(), "refs/heads/unborn");
}
/**
* Update the HEAD ref when the referenced branch is unborn
*
* @throws Exception
*/
@Test
public void testUpdateRefDetachedUnbornHead() throws Exception {
ObjectId ppid = db.resolve("refs/heads/master^");
writeSymref("HEAD", "refs/heads/unborn");
RefUpdate updateRef = db.updateRef("HEAD", true);
updateRef.setForceUpdate(true);
updateRef.setNewObjectId(ppid);
RefUpdate.Result update = updateRef.update();
assertEquals(RefUpdate.Result.NEW, update);
assertEquals(ppid, db.resolve("HEAD"));
Ref ref = db.exactRef("HEAD");
assertEquals("HEAD", ref.getName());
assertTrue("is detached", !ref.isSymbolic());
// the branch HEAD referred to is left untouched
assertNull(db.resolve("refs/heads/unborn"));
ReflogReader reflogReader = db.getReflogReader("HEAD");
ReflogEntry e = reflogReader.getReverseEntries().get(0);
assertEquals(ObjectId.zeroId(), e.getOldId());
assertEquals(ppid, e.getNewId());
assertEquals("GIT_COMMITTER_EMAIL", e.getWho().getEmailAddress());
assertEquals("GIT_COMMITTER_NAME", e.getWho().getName());
assertEquals(1250379778000L, e.getWho().getWhen().getTime());
}
@Test
public void testDeleteNotFound() throws IOException {
RefUpdate ref = updateRef("refs/heads/doesnotexist");
assertNull(db.exactRef(ref.getName()));
assertEquals(RefUpdate.Result.NEW, ref.delete());
assertNull(db.exactRef(ref.getName()));
}
@Test
public void testRenameSymref() throws IOException {
db.resolve("HEAD");
RefRename r = db.renameRef("HEAD", "KOPF");
assertEquals(IO_FAILURE, r.rename());
}
@Test
public void testRenameCurrentBranch() throws IOException {
ObjectId rb = db.resolve("refs/heads/b");
writeSymref(Constants.HEAD, "refs/heads/b");
ObjectId oldHead = db.resolve(Constants.HEAD);
assertEquals("internal test condition, b == HEAD", oldHead, rb);
RefRename renameRef = db.renameRef("refs/heads/b",
"refs/heads/new/name");
RefUpdate.Result result = renameRef.rename();
assertEquals(RefUpdate.Result.RENAMED, result);
assertEquals(rb, db.resolve("refs/heads/new/name"));
assertNull(db.resolve("refs/heads/b"));
assertEquals(rb, db.resolve(Constants.HEAD));
List<String> names = new ArrayList<>();
names.add("HEAD");
names.add("refs/heads/b");
names.add("refs/heads/new/name");
for (String nm : names) {
ReflogReader rd = db.getReflogReader(nm);
assertNotNull(rd);
ReflogEntry last = rd.getLastEntry();
ObjectId id = last.getNewId();
assertTrue(ObjectId.zeroId().equals(id) || rb.equals(id));
id = last.getNewId();
assertTrue(ObjectId.zeroId().equals(id) || rb.equals(id));
String want = "Branch: renamed b to new/name";
assertEquals(want, last.getComment());
}
}
@Test
public void testRenameDestExists() throws IOException {
ObjectId rb = db.resolve("refs/heads/b");
writeSymref(Constants.HEAD, "refs/heads/b");
ObjectId oldHead = db.resolve(Constants.HEAD);
assertEquals("internal test condition, b == HEAD", oldHead, rb);
RefRename renameRef = db.renameRef("refs/heads/b", "refs/heads/a");
RefUpdate.Result result = renameRef.rename();
assertEquals(RefUpdate.Result.LOCK_FAILURE, result);
}
@Test
public void testRenameAtomic() throws IOException {
ObjectId prevId = db.resolve("refs/heads/master^");
RefRename rename = db.renameRef("refs/heads/master",
"refs/heads/newmaster");
RefUpdate updateRef = db.updateRef("refs/heads/master");
updateRef.setNewObjectId(prevId);
updateRef.setForceUpdate(true);
assertEquals(FORCED, updateRef.update());
assertEquals(RefUpdate.Result.LOCK_FAILURE, rename.rename());
}
@Test
public void reftableRefsStorageClass() throws IOException {
Ref b = db.exactRef("refs/heads/b");
assertEquals(Ref.Storage.PACKED, b.getStorage());
}
private RefUpdate updateRef(String name) throws IOException {
final RefUpdate ref = db.updateRef(name);
ref.setNewObjectId(db.resolve(Constants.HEAD));
return ref;
}
private void writeSymref(String src, String dst) throws IOException {
RefUpdate u = db.updateRef(src);
switch (u.link(dst)) {
case NEW:
case FORCED:
case NO_CHANGE:
break;
default:
fail("link " + src + " to " + dst);
}
}
}

58
org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/RefUpdateTest.java

@ -45,9 +45,12 @@
package org.eclipse.jgit.internal.storage.file;
import static org.eclipse.jgit.junit.Assert.assertEquals;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.eclipse.jgit.junit.Assert.assertEquals;
import static org.eclipse.jgit.lib.Constants.LOCK_SUFFIX;
import static org.eclipse.jgit.lib.RefUpdate.Result.FORCED;
import static org.eclipse.jgit.lib.RefUpdate.Result.IO_FAILURE;
import static org.eclipse.jgit.lib.RefUpdate.Result.LOCK_FAILURE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@ -82,7 +85,6 @@ import org.eclipse.jgit.test.resources.SampleDataRepositoryTestCase;
import org.junit.Test;
public class RefUpdateTest extends SampleDataRepositoryTestCase {
private void writeSymref(String src, String dst) throws IOException {
RefUpdate u = db.updateRef(src);
switch (u.link(dst)) {
@ -232,6 +234,17 @@ public class RefUpdateTest extends SampleDataRepositoryTestCase {
assertEquals(0,db.getReflogReader("HEAD").getReverseEntries().size());
}
@Test
public void testWriteReflog() throws IOException {
ObjectId pid = db.resolve("refs/heads/master^");
RefUpdate updateRef = db.updateRef("refs/heads/master");
updateRef.setNewObjectId(pid);
updateRef.setForceUpdate(true);
Result update = updateRef.update();
assertEquals(Result.FORCED, update);
assertEquals(1,db.getReflogReader("refs/heads/master").getReverseEntries().size());
}
@Test
public void testLooseDelete() throws IOException {
final String newRef = "refs/heads/abc";
@ -379,6 +392,8 @@ public class RefUpdateTest extends SampleDataRepositoryTestCase {
refUpdate.setNewObjectId(ObjectId.zeroId());
Result updateResult = refUpdate.update();
assertEquals(Result.FORCED, updateResult);
assertEquals(ObjectId.zeroId(), db.exactRef("HEAD").getObjectId());
Result deleteHeadResult = db.updateRef(Constants.HEAD).delete();
assertEquals(Result.NO_CHANGE, deleteHeadResult);
@ -902,6 +917,45 @@ public class RefUpdateTest extends SampleDataRepositoryTestCase {
"refs/heads/new/name", "refs/heads/a");
}
@Test
public void testUpdateChecksOldValue() throws Exception {
ObjectId cur = db.resolve("master");
ObjectId prev = db.resolve("master^");
RefUpdate u1 = db.updateRef("refs/heads/master");
RefUpdate u2 = db.updateRef("refs/heads/master");
u1.setExpectedOldObjectId(cur);
u1.setNewObjectId(prev);
u1.setForceUpdate(true);
u2.setExpectedOldObjectId(cur);
u2.setNewObjectId(prev);
u2.setForceUpdate(true);
assertEquals(FORCED, u1.update());
assertEquals(LOCK_FAILURE, u2.update());
}
@Test
public void testRenameAtomic() throws IOException {
ObjectId prevId = db.resolve("refs/heads/master^");
RefRename rename = db.renameRef("refs/heads/master", "refs/heads/newmaster");
RefUpdate updateRef = db.updateRef("refs/heads/master");
updateRef.setNewObjectId(prevId);
updateRef.setForceUpdate(true);
assertEquals(FORCED, updateRef.update());
assertEquals(RefUpdate.Result.LOCK_FAILURE, rename.rename());
}
@Test
public void testRenameSymref() throws IOException {
db.resolve("HEAD");
RefRename r = db.renameRef("HEAD", "KOPF");
assertEquals(IO_FAILURE, r.rename());
}
@Test
public void testRenameRefNameColission1avoided() throws IOException {
// setup

6
org.eclipse.jgit/resources/org/eclipse/jgit/internal/JGitText.properties

@ -282,11 +282,13 @@ expectedReceivedContentType=expected Content-Type {0}; received Content-Type {1}
expectedReportForRefNotReceived={0}: expected report for ref {1} not received
failedAtomicFileCreation=Atomic file creation failed, number of hard links to file {0} was not 2 but {1}
failedCreateLockFile=Creating lock file {} failed
failedToConvert=Failed to convert rest: %s
failedToDetermineFilterDefinition=An exception occurred while determining filter definitions
failedUpdatingRefs=failed updating refs
failureDueToOneOfTheFollowing=Failure due to one of the following:
failureUpdatingFETCH_HEAD=Failure updating FETCH_HEAD: {0}
failureUpdatingTrackingRef=Failure updating tracking ref {0}: {1}
fileAlreadyExists=File already exists: {0}
fileCannotBeDeleted=File cannot be deleted: {0}
fileIsTooLarge=File is too large: {0}
fileModeNotSetForPath=FileMode not set for path {0}
@ -325,6 +327,7 @@ incorrectHashFor=Incorrect hash for {0}; computed {1} as a {2} from {3} bytes.
incorrectOBJECT_ID_LENGTH=Incorrect OBJECT_ID_LENGTH.
indexFileCorruptedNegativeBucketCount=Invalid negative bucket count read from pack v2 index file: {0}
indexFileIsTooLargeForJgit=Index file is too large for jgit
indexNumbersNotIncreasing=index numbers not increasing: ''{0}'': min {1}, last max {2}
indexWriteException=Modified index could not be written
initFailedBareRepoDifferentDirs=When initializing a bare repo with directory {0} and separate git-dir {1} specified both folders must point to the same location
initFailedDirIsNoDirectory=Cannot set directory to ''{0}'' which is not a directory
@ -552,7 +555,8 @@ refAlreadyExists=already exists
refAlreadyExists1=Ref {0} already exists
reflogEntryNotFound=Entry {0} not found in reflog for ''{1}''
refNotResolved=Ref {0} cannot be resolved
refTableRecordsMustIncrease=records must be increasing: last {0}, this {1}
reftableDirExists=reftable dir exists and is nonempty
reftableRecordsMustIncrease=records must be increasing: last {0}, this {1}
refUpdateReturnCodeWas=RefUpdate return code was: {0}
remoteConfigHasNoURIAssociated=Remote config "{0}" has no URIs associated
remoteDoesNotHaveSpec=Remote does not have {0} available for fetch.

6
org.eclipse.jgit/src/org/eclipse/jgit/internal/JGitText.java

@ -344,10 +344,12 @@ public class JGitText extends TranslationBundle {
/***/ public String failedAtomicFileCreation;
/***/ public String failedCreateLockFile;
/***/ public String failedToDetermineFilterDefinition;
/***/ public String failedToConvert;
/***/ public String failedUpdatingRefs;
/***/ public String failureDueToOneOfTheFollowing;
/***/ public String failureUpdatingFETCH_HEAD;
/***/ public String failureUpdatingTrackingRef;
/***/ public String fileAlreadyExists;
/***/ public String fileCannotBeDeleted;
/***/ public String fileIsTooLarge;
/***/ public String fileModeNotSetForPath;
@ -386,6 +388,7 @@ public class JGitText extends TranslationBundle {
/***/ public String incorrectOBJECT_ID_LENGTH;
/***/ public String indexFileCorruptedNegativeBucketCount;
/***/ public String indexFileIsTooLargeForJgit;
/***/ public String indexNumbersNotIncreasing;
/***/ public String indexWriteException;
/***/ public String initFailedBareRepoDifferentDirs;
/***/ public String initFailedDirIsNoDirectory;
@ -613,7 +616,8 @@ public class JGitText extends TranslationBundle {
/***/ public String refAlreadyExists1;
/***/ public String reflogEntryNotFound;
/***/ public String refNotResolved;
/***/ public String refTableRecordsMustIncrease;
/***/ public String reftableDirExists;
/***/ public String reftableRecordsMustIncrease;
/***/ public String refUpdateReturnCodeWas;
/***/ public String remoteConfigHasNoURIAssociated;
/***/ public String remoteDoesNotHaveSpec;

651
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileReftableDatabase.java

@ -0,0 +1,651 @@
/*
* Copyright (C) 2019 Google LLC
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.file;
import static org.eclipse.jgit.lib.Ref.UNDEFINED_UPDATE_INDEX;
import static org.eclipse.jgit.lib.Ref.Storage.NEW;
import static org.eclipse.jgit.lib.Ref.Storage.PACKED;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import org.eclipse.jgit.annotations.NonNull;
import org.eclipse.jgit.events.RefsChangedEvent;
import org.eclipse.jgit.internal.storage.reftable.MergedReftable;
import org.eclipse.jgit.internal.storage.reftable.ReftableBatchRefUpdate;
import org.eclipse.jgit.internal.storage.reftable.ReftableDatabase;
import org.eclipse.jgit.internal.storage.reftable.ReftableWriter;
import org.eclipse.jgit.lib.BatchRefUpdate;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectIdRef;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.RefDatabase;
import org.eclipse.jgit.lib.RefRename;
import org.eclipse.jgit.lib.RefUpdate;
import org.eclipse.jgit.lib.ReflogEntry;
import org.eclipse.jgit.lib.ReflogReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.SymbolicRef;
import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevTag;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.transport.ReceiveCommand;
import org.eclipse.jgit.util.FileUtils;
import org.eclipse.jgit.util.RefList;
import org.eclipse.jgit.util.RefMap;
/**
* Implements RefDatabase using reftable for storage.
*
* This class is threadsafe.
*/
public class FileReftableDatabase extends RefDatabase {
private final ReftableDatabase reftableDatabase;
private final FileRepository fileRepository;
private final FileReftableStack reftableStack;
FileReftableDatabase(FileRepository repo, File refstackName) throws IOException {
this.fileRepository = repo;
this.reftableStack = new FileReftableStack(refstackName,
new File(fileRepository.getDirectory(), Constants.REFTABLE),
() -> fileRepository.fireEvent(new RefsChangedEvent()),
() -> fileRepository.getConfig());
this.reftableDatabase = new ReftableDatabase() {
@Override
public MergedReftable openMergedReftable() throws IOException {
return reftableStack.getMergedReftable();
}
};
}
ReflogReader getReflogReader(String refname) throws IOException {
return reftableDatabase.getReflogReader(refname);
}
/**
* @param repoDir
* @return whether the given repo uses reftable for refdb storage.
*/
public static boolean isReftable(File repoDir) {
return new File(repoDir, "refs").isFile() //$NON-NLS-1$
&& new File(repoDir, Constants.REFTABLE).isDirectory();
}
/**
* Runs a full compaction for GC purposes.
* @throws IOException on I/O errors
*/
public void compactFully() throws IOException {
reftableDatabase.getLock().lock();
try {
reftableStack.compactFully();
} finally {
reftableDatabase.getLock().unlock();
}
}
private ReentrantLock getLock() {
return reftableDatabase.getLock();
}
/** {@inheritDoc} */
@Override
public boolean performsAtomicTransactions() {
return true;
}
/** {@inheritDoc} */
@NonNull
@Override
public BatchRefUpdate newBatchUpdate() {
return new FileReftableBatchRefUpdate(this, fileRepository);
}
/** {@inheritDoc} */
@Override
public RefUpdate newUpdate(String refName, boolean detach)
throws IOException {
boolean detachingSymbolicRef = false;
Ref ref = exactRef(refName);
if (ref == null) {
ref = new ObjectIdRef.Unpeeled(NEW, refName, null);
} else {
detachingSymbolicRef = detach && ref.isSymbolic();
}
RefUpdate update = new FileReftableRefUpdate(ref);
if (detachingSymbolicRef) {
update.setDetachingSymbolicRef();
}
return update;
}
/** {@inheritDoc} */
@Override
public Ref exactRef(String name) throws IOException {
return reftableDatabase.exactRef(name);
}
/** {@inheritDoc} */
@Override
public List<Ref> getRefs() throws IOException {
return super.getRefs();
}
/** {@inheritDoc} */
@Override
public Map<String, Ref> getRefs(String prefix) throws IOException {
List<Ref> refs = reftableDatabase.getRefsByPrefix(prefix);
RefList.Builder<Ref> builder = new RefList.Builder<>(refs.size());
for (Ref r : refs) {
builder.add(r);
}
return new RefMap(prefix, builder.toRefList(), RefList.emptyList(),
RefList.emptyList());
}
/** {@inheritDoc} */
@Override
public List<Ref> getAdditionalRefs() throws IOException {
return Collections.emptyList();
}
/** {@inheritDoc} */
@Override
public Ref peel(Ref ref) throws IOException {
Ref oldLeaf = ref.getLeaf();
if (oldLeaf.isPeeled() || oldLeaf.getObjectId() == null) {
return ref;
}
return recreate(ref, doPeel(oldLeaf), hasVersioning());
}
private Ref doPeel(Ref leaf) throws IOException {
try (RevWalk rw = new RevWalk(fileRepository)) {
RevObject obj = rw.parseAny(leaf.getObjectId());
if (obj instanceof RevTag) {
return new ObjectIdRef.PeeledTag(leaf.getStorage(),
leaf.getName(), leaf.getObjectId(), rw.peel(obj).copy(),
hasVersioning() ? leaf.getUpdateIndex()
: UNDEFINED_UPDATE_INDEX);
}
return new ObjectIdRef.PeeledNonTag(leaf.getStorage(),
leaf.getName(), leaf.getObjectId(),
hasVersioning() ? leaf.getUpdateIndex()
: UNDEFINED_UPDATE_INDEX);
}
}
private static Ref recreate(Ref old, Ref leaf, boolean hasVersioning) {
if (old.isSymbolic()) {
Ref dst = recreate(old.getTarget(), leaf, hasVersioning);
return new SymbolicRef(old.getName(), dst,
hasVersioning ? old.getUpdateIndex()
: UNDEFINED_UPDATE_INDEX);
}
return leaf;
}
private class FileRefRename extends RefRename {
FileRefRename(RefUpdate src, RefUpdate dst) {
super(src, dst);
}
void writeRename(ReftableWriter w) throws IOException {
long idx = reftableDatabase.nextUpdateIndex();
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin();
List<Ref> refs = new ArrayList<>(3);
Ref dest = destination.getRef();
Ref head = exactRef(Constants.HEAD);
if (head != null && head.isSymbolic()
&& head.getLeaf().getName().equals(source.getName())) {
head = new SymbolicRef(Constants.HEAD, dest, idx);
refs.add(head);
}
ObjectId objId = source.getRef().getObjectId();
// XXX should we check if the source is a Tag vs. NonTag?
refs.add(new ObjectIdRef.PeeledNonTag(Ref.Storage.NEW,
destination.getName(), objId));
refs.add(new ObjectIdRef.Unpeeled(Ref.Storage.NEW, source.getName(),
null));
w.sortAndWriteRefs(refs);
PersonIdent who = destination.getRefLogIdent();
if (who == null) {
who = new PersonIdent(fileRepository);
}
if (!destination.getRefLogMessage().isEmpty()) {
List<String> refnames = refs.stream().map(r -> r.getName())
.collect(Collectors.toList());
Collections.sort(refnames);
for (String s : refnames) {
ObjectId old = (Constants.HEAD.equals(s)
|| s.equals(source.getName())) ? objId
: ObjectId.zeroId();
ObjectId newId = (Constants.HEAD.equals(s)
|| s.equals(destination.getName())) ? objId
: ObjectId.zeroId();
w.writeLog(s, idx, who, old, newId,
destination.getRefLogMessage());
}
}
}
@Override
protected RefUpdate.Result doRename() throws IOException {
Ref src = exactRef(source.getName());
if (exactRef(destination.getName()) != null || src == null
|| !source.getOldObjectId().equals(src.getObjectId())) {
return RefUpdate.Result.LOCK_FAILURE;
}
if (src.isSymbolic()) {
// We could support this, but this is easier and compatible.
return RefUpdate.Result.IO_FAILURE;
}
if (!addReftable(this::writeRename)) {
return RefUpdate.Result.LOCK_FAILURE;
}
return RefUpdate.Result.RENAMED;
}
}
/** {@inheritDoc} */
@Override
public RefRename newRename(String fromName, String toName)
throws IOException {
RefUpdate src = newUpdate(fromName, true);
RefUpdate dst = newUpdate(toName, true);
return new FileRefRename(src, dst);
}
/** {@inheritDoc} */
@Override
public boolean isNameConflicting(String name) throws IOException {
return reftableDatabase.isNameConflicting(name, new TreeSet<>(),
new HashSet<>());
}
/** {@inheritDoc} */
@Override
public void close() {
reftableStack.close();
}
/** {@inheritDoc} */
@Override
public void create() throws IOException {
FileUtils.mkdir(
new File(fileRepository.getDirectory(), Constants.REFTABLE),
true);
}
private boolean addReftable(FileReftableStack.Writer w) throws IOException {
if (!reftableStack.addReftable(w)) {
reftableStack.reload();
reftableDatabase.clearCache();
return false;
}
reftableDatabase.clearCache();
return true;
}
private class FileReftableBatchRefUpdate extends ReftableBatchRefUpdate {
FileReftableBatchRefUpdate(FileReftableDatabase db,
Repository repository) {
super(db, db.reftableDatabase, db.getLock(), repository);
}
@Override
protected void applyUpdates(List<Ref> newRefs,
List<ReceiveCommand> pending) throws IOException {
if (!addReftable(rw -> write(rw, newRefs, pending))) {
for (ReceiveCommand c : pending) {
if (c.getResult() == ReceiveCommand.Result.NOT_ATTEMPTED) {
c.setResult(RefUpdate.Result.LOCK_FAILURE);
}
}
}
}
}
private class FileReftableRefUpdate extends RefUpdate {
FileReftableRefUpdate(Ref ref) {
super(ref);
}
@Override
protected RefDatabase getRefDatabase() {
return FileReftableDatabase.this;
}
@Override
protected Repository getRepository() {
return FileReftableDatabase.this.fileRepository;
}
@Override
protected void unlock() {
// nop.
}
private RevWalk rw;
private Ref dstRef;
@Override
public Result update(RevWalk walk) throws IOException {
try {
rw = walk;
return super.update(walk);
} finally {
rw = null;
}
}
@Override
protected boolean tryLock(boolean deref) throws IOException {
dstRef = getRef();
if (deref) {
dstRef = dstRef.getLeaf();
}
Ref derefed = exactRef(dstRef.getName());
if (derefed != null) {
setOldObjectId(derefed.getObjectId());
}
return true;
}
void writeUpdate(ReftableWriter w) throws IOException {
Ref newRef = null;
if (rw != null && !ObjectId.zeroId().equals(getNewObjectId())) {
RevObject obj = rw.parseAny(getNewObjectId());
if (obj instanceof RevTag) {
newRef = new ObjectIdRef.PeeledTag(Ref.Storage.PACKED,
dstRef.getName(), getNewObjectId(),
rw.peel(obj).copy());
}
}
if (newRef == null) {
newRef = new ObjectIdRef.PeeledNonTag(Ref.Storage.PACKED,
dstRef.getName(), getNewObjectId());
}
long idx = reftableDatabase.nextUpdateIndex();
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin()
.writeRef(newRef);
ObjectId oldId = getOldObjectId();
if (oldId == null) {
oldId = ObjectId.zeroId();
}
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), oldId,
getNewObjectId(), getRefLogMessage());
}
@Override
public PersonIdent getRefLogIdent() {
PersonIdent who = super.getRefLogIdent();
if (who == null) {
who = new PersonIdent(getRepository());
}
return who;
}
void writeDelete(ReftableWriter w) throws IOException {
Ref newRef = new ObjectIdRef.Unpeeled(Ref.Storage.NEW,
dstRef.getName(), null);
long idx = reftableDatabase.nextUpdateIndex();
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin()
.writeRef(newRef);
ObjectId oldId = ObjectId.zeroId();
Ref old = exactRef(dstRef.getName());
if (old != null) {
old = old.getLeaf();
if (old.getObjectId() != null) {
oldId = old.getObjectId();
}
}
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), oldId,
ObjectId.zeroId(), getRefLogMessage());
}
@Override
protected Result doUpdate(Result desiredResult) throws IOException {
if (isRefLogIncludingResult()) {
setRefLogMessage(
getRefLogMessage() + ": " + desiredResult.toString(), //$NON-NLS-1$
false);
}
if (!addReftable(this::writeUpdate)) {
return Result.LOCK_FAILURE;
}
return desiredResult;
}
@Override
protected Result doDelete(Result desiredResult) throws IOException {
if (isRefLogIncludingResult()) {
setRefLogMessage(
getRefLogMessage() + ": " + desiredResult.toString(), //$NON-NLS-1$
false);
}
if (!addReftable(this::writeDelete)) {
return Result.LOCK_FAILURE;
}
return desiredResult;
}
void writeLink(ReftableWriter w) throws IOException {
long idx = reftableDatabase.nextUpdateIndex();
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin()
.writeRef(dstRef);
ObjectId beforeId = ObjectId.zeroId();
Ref before = exactRef(dstRef.getName());
if (before != null) {
before = before.getLeaf();
if (before.getObjectId() != null) {
beforeId = before.getObjectId();
}
}
Ref after = dstRef.getLeaf();
ObjectId afterId = ObjectId.zeroId();
if (after.getObjectId() != null) {
afterId = after.getObjectId();
}
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), beforeId,
afterId, getRefLogMessage());
}
@Override
protected Result doLink(String target) throws IOException {
if (isRefLogIncludingResult()) {
setRefLogMessage(
getRefLogMessage() + ": " + Result.FORCED.toString(), //$NON-NLS-1$
false);
}
boolean exists = exactRef(getName()) != null;
dstRef = new SymbolicRef(getName(),
new ObjectIdRef.Unpeeled(Ref.Storage.NEW, target, null),
reftableDatabase.nextUpdateIndex());
if (!addReftable(this::writeLink)) {
return Result.LOCK_FAILURE;
}
// XXX unclear if we should support FORCED here. Baseclass says
// NEW is OK ?
return exists ? Result.FORCED : Result.NEW;
}
}
private static void writeConvertTable(Repository repo, ReftableWriter w,
boolean writeLogs) throws IOException {
int size = 0;
List<Ref> refs = repo.getRefDatabase().getRefs();
if (writeLogs) {
for (Ref r : refs) {
ReflogReader rlr = repo.getReflogReader(r.getName());
if (rlr != null) {
size = Math.max(rlr.getReverseEntries().size(), size);
}
}
}
// We must use 1 here, nextUpdateIndex() on the empty stack is 1.
w.setMinUpdateIndex(1).setMaxUpdateIndex(size + 1).begin();
// The spec says to write the logs in the first table, and put refs in a
// separate table, but this complicates the compaction (when we can we drop
// deletions? Can we compact the .log table and the .ref table together?)
try (RevWalk rw = new RevWalk(repo)) {
List<Ref> toWrite = new ArrayList<>(refs.size());
for (Ref r : refs) {
toWrite.add(refForWrite(rw, r));
}
w.sortAndWriteRefs(toWrite);
}
if (writeLogs) {
for (Ref r : refs) {
long idx = size;
ReflogReader reader = repo.getReflogReader(r.getName());
if (reader == null) {
continue;
}
for (ReflogEntry e : reader.getReverseEntries()) {
w.writeLog(r.getName(), idx, e.getWho(), e.getOldId(),
e.getNewId(), e.getComment());
idx--;
}
}
}
}
private static Ref refForWrite(RevWalk rw, Ref r) throws IOException {
if (r.isSymbolic()) {
return new SymbolicRef(r.getName(), new ObjectIdRef.Unpeeled(NEW,
r.getTarget().getName(), null));
}
ObjectId newId = r.getObjectId();
RevObject obj = rw.parseAny(newId);
RevObject peel = null;
if (obj instanceof RevTag) {
peel = rw.peel(obj);
}
if (peel != null) {
return new ObjectIdRef.PeeledTag(PACKED, r.getName(), newId,
peel.copy());
}
return new ObjectIdRef.PeeledNonTag(PACKED, r.getName(), newId);
}
/**
* @param repo
* the repository
* @param refstackName
* the filename for the stack
* @param writeLogs
* whether to write reflogs
* @return a reftable based RefDB from an existing repository.
* @throws IOException
* on IO error
*/
public static FileReftableDatabase convertFrom(FileRepository repo,
File refstackName, boolean writeLogs) throws IOException {
FileReftableDatabase newDb = null;
try {
File reftableDir = new File(repo.getDirectory(), Constants.REFTABLE);
if (!reftableDir.isDirectory()) {
reftableDir.mkdir();
}
try (FileReftableStack stack = new FileReftableStack(refstackName,
reftableDir, null, () -> repo.getConfig())) {
stack.addReftable(rw -> writeConvertTable(repo, rw, writeLogs));
}
refstackName = null;
} finally {
if (refstackName != null) {
refstackName.delete();
}
}
return newDb;
}
}

768
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileReftableStack.java

@ -0,0 +1,768 @@
/*
* Copyright (C) 2019 Google LLC
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.file;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.errors.LockFailedException;
import org.eclipse.jgit.internal.storage.io.BlockSource;
import org.eclipse.jgit.internal.storage.reftable.MergedReftable;
import org.eclipse.jgit.internal.storage.reftable.ReftableCompactor;
import org.eclipse.jgit.internal.storage.reftable.ReftableConfig;
import org.eclipse.jgit.internal.storage.reftable.ReftableReader;
import org.eclipse.jgit.internal.storage.reftable.ReftableWriter;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.util.FileUtils;
/**
* A mutable stack of reftables on local filesystem storage. Not thread-safe.
* This is an AutoCloseable because this object owns the file handles to the
* open reftables.
*/
public class FileReftableStack implements AutoCloseable {
private static class StackEntry {
String name;
ReftableReader reftableReader;
}
private MergedReftable mergedReftable;
private List<StackEntry> stack;
private long lastNextUpdateIndex;
private final File stackPath;
private final File reftableDir;
private final Runnable onChange;
private final Supplier<Config> configSupplier;
// Used for stats & testing.
static class CompactionStats {
long tables;
long bytes;
int attempted;
int failed;
long refCount;
long logCount;
CompactionStats() {
tables = 0;
bytes = 0;
attempted = 0;
failed = 0;
logCount = 0;
refCount = 0;
}
}
private final CompactionStats stats;
/**
* Creates a stack corresponding to the list of reftables in the argument
*
* @param stackPath
* the filename for the stack.
* @param reftableDir
* the dir holding the tables.
* @param onChange
* hook to call if we notice a new write
* @param configSupplier
* Config supplier
* @throws IOException
* on I/O problems
*/
public FileReftableStack(File stackPath, File reftableDir,
@Nullable Runnable onChange, Supplier<Config> configSupplier)
throws IOException {
this.stackPath = stackPath;
this.reftableDir = reftableDir;
this.stack = new ArrayList<>();
this.configSupplier = configSupplier;
this.onChange = onChange;
// skip event notification
lastNextUpdateIndex = 0;
reload();
stats = new CompactionStats();
}
CompactionStats getStats() {
return stats;
}
/** Thrown if the update indices in the stack are not monotonic */
public static class ReftableNumbersNotIncreasingException
extends RuntimeException {
private static final long serialVersionUID = 1L;
String name;
long lastMax;
long min;
ReftableNumbersNotIncreasingException(String name, long lastMax,
long min) {
this.name = name;
this.lastMax = lastMax;
this.min = min;
}
}
/**
* Reloads the stack, potentially reusing opened reftableReaders.
*
* @param names
* holds the names of the tables to load.
* @throws FileNotFoundException
* load must be retried.
* @throws IOException
* on other IO errors.
*/
private void reloadOnce(List<String> names)
throws IOException, FileNotFoundException {
Map<String, ReftableReader> current = stack.stream()
.collect(Collectors.toMap(e -> e.name, e -> e.reftableReader));
List<ReftableReader> newTables = new ArrayList<>();
List<StackEntry> newStack = new ArrayList<>(stack.size() + 1);
try {
ReftableReader last = null;
for (String name : names) {
StackEntry entry = new StackEntry();
entry.name = name;
ReftableReader t = null;
if (current.containsKey(name)) {
t = current.remove(name);
} else {
File subtable = new File(reftableDir, name);
FileInputStream is;
is = new FileInputStream(subtable);
t = new ReftableReader(BlockSource.from(is));
newTables.add(t);
}
if (last != null) {
// TODO: move this to MergedReftable
if (last.maxUpdateIndex() >= t.minUpdateIndex()) {
throw new ReftableNumbersNotIncreasingException(name,
last.maxUpdateIndex(), t.minUpdateIndex());
}
}
last = t;
entry.reftableReader = t;
newStack.add(entry);
}
// survived without exceptions: swap in new stack, and close
// dangling tables.
stack = newStack;
newTables.clear();
current.values().forEach(r -> {
try {
r.close();
} catch (IOException e) {
throw new AssertionError(e);
}
});
} finally {
newTables.forEach(t -> {
try {
t.close();
} catch (IOException ioe) {
// reader close should not generate errors.
throw new AssertionError(ioe);
}
});
}
}
void reload() throws IOException {
// Try for 2.5 seconds.
long deadline = System.currentTimeMillis() + 2500;
// A successful reftable transaction is 2 atomic file writes
// (open, write, close, rename), which a fast Linux system should be
// able to do in about ~200us. So 1 ms should be ample time.
long min = 1;
long max = 1000;
long delay = 0;
boolean success = false;
while (System.currentTimeMillis() < deadline) {
List<String> names = readTableNames();
try {
reloadOnce(names);
success = true;
break;
} catch (FileNotFoundException e) {
List<String> changed = readTableNames();
if (changed.equals(names)) {
throw e;
}
}
delay = FileUtils.delay(delay, min, max);
try {
Thread.sleep(delay);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
if (!success) {
// TODO: should reexamine the 'refs' file to see if it was the same
// if it didn't change, then we must have corruption. If it did,
// retry.
throw new LockFailedException(stackPath);
}
mergedReftable = new MergedReftable(stack.stream()
.map(x -> x.reftableReader).collect(Collectors.toList()));
long curr = nextUpdateIndex();
if (lastNextUpdateIndex > 0 && lastNextUpdateIndex != curr
&& onChange != null) {
onChange.run();
}
lastNextUpdateIndex = curr;
}
/**
* @return the merged reftable
*/
public MergedReftable getMergedReftable() {
return mergedReftable;
}
/**
* Writer is a callable that writes data to a reftable under construction.
* It should set the min/max update index, and then write refs and/or logs.
* It should not call finish() on the writer.
*/
public interface Writer {
/**
* Write data to reftable
*
* @param w
* writer to use
* @throws IOException
*/
void call(ReftableWriter w) throws IOException;
}
private List<String> readTableNames() throws IOException {
List<String> names = new ArrayList<>(stack.size() + 1);
try (BufferedReader br = new BufferedReader(
new InputStreamReader(new FileInputStream(stackPath), UTF_8))) {
String line;
while ((line = br.readLine()) != null) {
if (!line.isEmpty()) {
names.add(line);
}
}
} catch (FileNotFoundException e) {
// file isn't there: empty repository.
}
return names;
}
/**
* @return true if the on-disk file corresponds to the in-memory data.
* @throws IOException
* on IO problem
*/
boolean isUpToDate() throws IOException {
// We could use FileSnapshot to avoid reading the file, but the file is
// small so it's probably a minor optimization.
try {
List<String> names = readTableNames();
if (names.size() != stack.size()) {
return false;
}
for (int i = 0; i < names.size(); i++) {
if (!names.get(i).equals(stack.get(i).name)) {
return false;
}
}
} catch (FileNotFoundException e) {
return stack.isEmpty();
}
return true;
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
for (StackEntry entry : stack) {
try {
entry.reftableReader.close();
} catch (Exception e) {
// we are reading; this should never fail.
throw new AssertionError(e);
}
}
}
private long nextUpdateIndex() throws IOException {
return stack.size() > 0
? stack.get(stack.size() - 1).reftableReader.maxUpdateIndex()
+ 1
: 1;
}
private String filename(long low, long high) {
return String.format("%012x-%012x", //$NON-NLS-1$
Long.valueOf(low), Long.valueOf(high));
}
/**
* Tries to add a new reftable to the stack. Returns true if it succeeded,
* or false if there was a lock failure, due to races with other processes.
* This is package private so FileReftableDatabase can call into here.
*
* @param w
* writer to write data to a reftable under construction
* @return true if the transaction.
* @throws IOException
* on I/O problems
*/
@SuppressWarnings("nls")
public boolean addReftable(Writer w) throws IOException {
LockFile lock = new LockFile(stackPath);
try {
if (!lock.lockForAppend()) {
return false;
}
if (!isUpToDate()) {
return false;
}
String fn = filename(nextUpdateIndex(), nextUpdateIndex());
File tmpTable = File.createTempFile(fn + "_", ".ref",
stackPath.getParentFile());
ReftableWriter.Stats s;
try (FileOutputStream fos = new FileOutputStream(tmpTable)) {
ReftableWriter rw = new ReftableWriter(reftableConfig(), fos);
w.call(rw);
rw.finish();
s = rw.getStats();
}
if (s.minUpdateIndex() < nextUpdateIndex()) {
return false;
}
// The spec says to name log-only files with .log, which is somewhat
// pointless given compaction, but we do so anyway.
fn += s.refCount() > 0 ? ".ref" : ".log";
File dest = new File(reftableDir, fn);
FileUtils.rename(tmpTable, dest, StandardCopyOption.ATOMIC_MOVE);
lock.write((fn + "\n").getBytes(UTF_8));
if (!lock.commit()) {
FileUtils.delete(dest);
return false;
}
reload();
autoCompact();
} finally {
lock.unlock();
}
return true;
}
private ReftableConfig reftableConfig() {
return new ReftableConfig(configSupplier.get());
}
/**
* Write the reftable for the given range into a temp file.
*
* @param first
* index of first stack entry to be written
* @param last
* index of last stack entry to be written
* @return the file holding the replacement table.
* @throws IOException
* on I/O problem
*/
private File compactLocked(int first, int last) throws IOException {
String fn = filename(first, last);
File tmpTable = File.createTempFile(fn + "_", ".ref", //$NON-NLS-1$//$NON-NLS-2$
stackPath.getParentFile());
try (FileOutputStream fos = new FileOutputStream(tmpTable)) {
ReftableCompactor c = new ReftableCompactor(fos)
.setConfig(reftableConfig())
.setMinUpdateIndex(
stack.get(first).reftableReader.minUpdateIndex())
.setMaxUpdateIndex(
stack.get(last).reftableReader.maxUpdateIndex())
.setIncludeDeletes(first > 0);
List<ReftableReader> compactMe = new ArrayList<>();
long totalBytes = 0;
for (int i = first; i <= last; i++) {
compactMe.add(stack.get(i).reftableReader);
totalBytes += stack.get(i).reftableReader.size();
}
c.addAll(compactMe);
c.compact();
// Even though the compaction did not definitely succeed, we keep
// tally here as we've expended the effort.
stats.bytes += totalBytes;
stats.tables += first - last + 1;
stats.attempted++;
stats.refCount += c.getStats().refCount();
stats.logCount += c.getStats().logCount();
}
return tmpTable;
}
/**
* Compacts a range of the stack, following the file locking protocol
* documented in the spec.
*
* @param first
* index of first stack entry to be considered in compaction
* @param last
* index of last stack entry to be considered in compaction
* @return true if a compaction was successfully applied.
* @throws IOException
* on I/O problem
*/
boolean compactRange(int first, int last) throws IOException {
if (first >= last) {
return true;
}
LockFile lock = new LockFile(stackPath);
File tmpTable = null;
List<LockFile> subtableLocks = new ArrayList<>();
try {
if (!lock.lock()) {
return false;
}
if (!isUpToDate()) {
return false;
}
List<File> deleteOnSuccess = new ArrayList<>();
for (int i = first; i <= last; i++) {
File f = new File(reftableDir, stack.get(i).name);
LockFile lf = new LockFile(f);
if (!lf.lock()) {
return false;
}
subtableLocks.add(lf);
deleteOnSuccess.add(f);
}
lock.unlock();
lock = null;
tmpTable = compactLocked(first, last);
lock = new LockFile(stackPath);
if (!lock.lock()) {
return false;
}
if (!isUpToDate()) {
return false;
}
String fn = filename(
stack.get(first).reftableReader.minUpdateIndex(),
stack.get(last).reftableReader.maxUpdateIndex());
// The spec suggests to use .log for log-only tables, and collect
// all log entries in a single file at the bottom of the stack. That would
// require supporting overlapping ranges for the different tables. For the
// sake of simplicity, we simply ignore this and always produce a log +
// ref combined table.
fn += ".ref"; //$NON-NLS-1$
File dest = new File(reftableDir, fn);
FileUtils.rename(tmpTable, dest, StandardCopyOption.ATOMIC_MOVE);
tmpTable = null;
StringBuilder sb = new StringBuilder();
for (int i = 0; i < first; i++) {
sb.append(stack.get(i).name + "\n"); //$NON-NLS-1$
}
sb.append(fn + "\n"); //$NON-NLS-1$
for (int i = last + 1; i < stack.size(); i++) {
sb.append(stack.get(i).name + "\n"); //$NON-NLS-1$
}
lock.write(sb.toString().getBytes(UTF_8));
if (!lock.commit()) {
dest.delete();
return false;
}
for (File f : deleteOnSuccess) {
Files.delete(f.toPath());
}
reload();
return true;
} finally {
if (tmpTable != null) {
tmpTable.delete();
}
for (LockFile lf : subtableLocks) {
lf.unlock();
}
if (lock != null) {
lock.unlock();
}
}
}
/**
* Calculate an approximate log2.
*
* @param sz
* @return log2
*/
static int log(long sz) {
long base = 2;
if (sz <= 0) {
throw new IllegalArgumentException("log2 negative"); //$NON-NLS-1$
}
int l = 0;
while (sz > 0) {
l++;
sz /= base;
}
return l - 1;
}
/**
* A segment is a consecutive list of reftables of the same approximate
* size.
*/
static class Segment {
// the approximate log_2 of the size.
int log;
// The total bytes in this segment
long bytes;
int start;
int end; // exclusive.
int size() {
return end - start;
}
Segment(int start, int end, int log, long bytes) {
this.log = log;
this.start = start;
this.end = end;
this.bytes = bytes;
}
Segment() {
this(0, 0, 0, 0);
}
@Override
public int hashCode() {
return 0; // appease error-prone
}
@Override
public boolean equals(Object other) {
Segment o = (Segment) other;
return o.bytes == bytes && o.log == log && o.start == start
&& o.end == end;
}
@SuppressWarnings("boxing")
@Override
public String toString() {
return String.format("{ [%d,%d) l=%d sz=%d }", start, end, log, //$NON-NLS-1$
bytes);
}
}
static List<Segment> segmentSizes(long sizes[]) {
List<Segment> segments = new ArrayList<>();
Segment cur = new Segment();
for (int i = 0; i < sizes.length; i++) {
int l = log(sizes[i]);
if (l != cur.log && cur.bytes > 0) {
segments.add(cur);
cur = new Segment();
cur.start = i;
cur.log = l;
}
cur.log = l;
cur.end = i + 1;
cur.bytes += sizes[i];
}
segments.add(cur);
return segments;
}
private static Optional<Segment> autoCompactCandidate(long sizes[]) {
if (sizes.length == 0) {
return Optional.empty();
}
// The cost of compaction is proportional to the size, and we want to
// avoid frequent large compactions. We do this by playing the game 2048
// here: first compact together the smallest tables if there are more
// than one. Then try to see if the result will be big enough to match
// up with next up.
List<Segment> segments = segmentSizes(sizes);
segments = segments.stream().filter(s -> s.size() > 1)
.collect(Collectors.toList());
if (segments.isEmpty()) {
return Optional.empty();
}
Optional<Segment> optMinSeg = segments.stream()
.min(Comparator.comparing(s -> Integer.valueOf(s.log)));
// Input is non-empty, so always present.
Segment smallCollected = optMinSeg.get();
while (smallCollected.start > 0) {
int prev = smallCollected.start - 1;
long prevSize = sizes[prev];
if (log(smallCollected.bytes) < log(prevSize)) {
break;
}
smallCollected.start = prev;
smallCollected.bytes += prevSize;
}
return Optional.of(smallCollected);
}
/**
* Heuristically tries to compact the stack if the stack has a suitable
* shape.
*
* @throws IOException
*/
private void autoCompact() throws IOException {
Optional<Segment> cand = autoCompactCandidate(tableSizes());
if (cand.isPresent()) {
if (!compactRange(cand.get().start, cand.get().end - 1)) {
stats.failed++;
}
}
}
// 68b footer, 24b header = 92.
private static long OVERHEAD = 91;
private long[] tableSizes() throws IOException {
long[] sizes = new long[stack.size()];
for (int i = 0; i < stack.size(); i++) {
// If we don't subtract the overhead, the file size isn't
// proportional to the number of entries. This will cause us to
// compact too often, which is expensive.
sizes[i] = stack.get(i).reftableReader.size() - OVERHEAD;
}
return sizes;
}
void compactFully() throws IOException {
if (!compactRange(0, stack.size() - 1)) {
stats.failed++;
}
}
}

188
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/FileRepository.java

@ -52,7 +52,9 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.MessageFormat;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Set;
@ -68,10 +70,12 @@ import org.eclipse.jgit.internal.storage.file.ObjectDirectory.AlternateHandle;
import org.eclipse.jgit.internal.storage.file.ObjectDirectory.AlternateRepository;
import org.eclipse.jgit.internal.storage.reftree.RefTreeDatabase;
import org.eclipse.jgit.lib.BaseRepositoryBuilder;
import org.eclipse.jgit.lib.BatchRefUpdate;
import org.eclipse.jgit.lib.ConfigConstants;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.CoreConfig.HideDotFiles;
import org.eclipse.jgit.lib.CoreConfig.SymLinks;
import org.eclipse.jgit.lib.NullProgressMonitor;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref;
@ -80,9 +84,11 @@ import org.eclipse.jgit.lib.RefUpdate;
import org.eclipse.jgit.lib.ReflogReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.StoredConfig;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileBasedConfig;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.transport.ReceiveCommand;
import org.eclipse.jgit.util.FileUtils;
import org.eclipse.jgit.util.IO;
import org.eclipse.jgit.util.RawParseUtils;
@ -121,7 +127,7 @@ public class FileRepository extends Repository {
private static final String UNNAMED = "Unnamed repository; edit this file to name it for gitweb."; //$NON-NLS-1$
private final FileBasedConfig repoConfig;
private final RefDatabase refs;
private RefDatabase refs;
private final ObjectDirectory objectDatabase;
private final Object snapshotLock = new Object();
@ -199,11 +205,16 @@ public class FileRepository extends Repository {
String reftype = repoConfig.getString(
"extensions", null, "refStorage"); //$NON-NLS-1$ //$NON-NLS-2$
if (repositoryFormatVersion >= 1 && reftype != null) {
if (StringUtils.equalsIgnoreCase(reftype, "reftree")) { //$NON-NLS-1$
if (StringUtils.equalsIgnoreCase(reftype, "reftable")) { //$NON-NLS-1$
refs = new FileReftableDatabase(this,
new File(getDirectory(), "refs")); //$NON-NLS-1$
} else if (StringUtils.equalsIgnoreCase(reftype, "reftree")) { //$NON-NLS-1$
refs = new RefTreeDatabase(this, new RefDirectory(this));
} else {
throw new IOException(JGitText.get().unknownRepositoryFormat);
}
} else if (FileReftableDatabase.isReftable(getDirectory())) {
refs = new FileReftableDatabase(this, new File(getDirectory(), "refs")); //$NON-NLS-1$
} else {
refs = new RefDirectory(this);
}
@ -530,10 +541,19 @@ public class FileRepository extends Repository {
/** {@inheritDoc} */
@Override
public ReflogReader getReflogReader(String refName) throws IOException {
if (refs instanceof FileReftableDatabase) {
// Cannot use findRef: reftable stores log data for deleted or renamed
// branches.
return ((FileReftableDatabase)refs).getReflogReader(refName);
}
// TODO: use exactRef here, which offers more predictable and therefore preferable
// behavior.
Ref ref = findRef(refName);
if (ref != null)
return new ReflogReaderImpl(this, ref.getName());
return null;
if (ref == null) {
return null;
}
return new ReflogReaderImpl(this, ref.getName());
}
/** {@inheritDoc} */
@ -613,4 +633,162 @@ public class FileRepository extends Repository {
throw new JGitInternalException(JGitText.get().gcFailed, e);
}
}
/**
* Converts the RefDatabase from reftable to RefDirectory. This operation is
* not atomic.
*
* @param backup
* whether to rename or delete the old storage files. If set to
* true, the reftable list is left in "refs.old", and the
* reftable/ dir is left alone. If set to false, the reftable/
* dir is removed, and "refs" file is removed.
* @throws IOException
* on IO problem
*/
void convertToPackedRefs(boolean backup) throws IOException {
List<Ref> all = refs.getRefs();
File packedRefs = new File(getDirectory(), Constants.PACKED_REFS);
if (packedRefs.exists()) {
throw new IOException(MessageFormat.format(JGitText.get().fileAlreadyExists,
packedRefs.getName()));
}
File refsFile = new File(getDirectory(), "refs"); //$NON-NLS-1$
refs.close();
if (backup) {
File refsOld = new File(getDirectory(), "refs.old"); //$NON-NLS-1$
if (refsOld.exists()) {
throw new IOException(MessageFormat.format(
JGitText.get().fileAlreadyExists,
"refs.old")); //$NON-NLS-1$
}
FileUtils.rename(refsFile, refsOld);
} else {
refsFile.delete();
}
// This is not atomic, but there is no way to instantiate a RefDirectory
// that is disconnected from the current repo.
refs = new RefDirectory(this);
refs.create();
List<Ref> symrefs = new ArrayList<>();
BatchRefUpdate bru = refs.newBatchUpdate();
for (Ref r : all) {
if (r.isSymbolic()) {
symrefs.add(r);
} else {
bru.addCommand(new ReceiveCommand(ObjectId.zeroId(),
r.getObjectId(), r.getName()));
}
}
try (RevWalk rw = new RevWalk(this)) {
bru.execute(rw, NullProgressMonitor.INSTANCE);
}
List<String> failed = new ArrayList<>();
for (ReceiveCommand cmd : bru.getCommands()) {
if (cmd.getResult() != ReceiveCommand.Result.OK) {
failed.add(cmd.getRefName() + ": " + cmd.getResult()); //$NON-NLS-1$
}
}
if (!failed.isEmpty()) {
throw new IOException(String.format("%s: %s", //$NON-NLS-1$
JGitText.get().failedToConvert,
StringUtils.join(failed, ", "))); //$NON-NLS-1$
}
for (Ref s : symrefs) {
RefUpdate up = refs.newUpdate(s.getName(), false);
up.setForceUpdate(true);
RefUpdate.Result res = up.link(s.getTarget().getName());
if (res != RefUpdate.Result.NEW
&& res != RefUpdate.Result.NO_CHANGE) {
throw new IOException(
String.format("ref %s: %s", s.getName(), res)); //$NON-NLS-1$
}
}
if (!backup) {
File reftableDir = new File(getDirectory(), Constants.REFTABLE);
FileUtils.delete(reftableDir,
FileUtils.RECURSIVE | FileUtils.IGNORE_ERRORS);
}
}
@SuppressWarnings("nls")
void convertToReftable(boolean writeLogs, boolean backup)
throws IOException {
File newRefs = new File(getDirectory(), "refs.new");
File reftableDir = new File(getDirectory(), Constants.REFTABLE);
if (reftableDir.exists() && reftableDir.listFiles().length > 0) {
throw new IOException(JGitText.get().reftableDirExists);
}
// Ignore return value, as it is tied to temporary newRefs file.
FileReftableDatabase.convertFrom(this, newRefs, writeLogs);
File refsFile = new File(getDirectory(), "refs");
// non-atomic: remove old data.
File packedRefs = new File(getDirectory(), Constants.PACKED_REFS);
File logsDir = new File(getDirectory(), Constants.LOGS);
if (backup) {
FileUtils.rename(refsFile, new File(getDirectory(), "refs.old"));
if (packedRefs.exists()) {
FileUtils.rename(packedRefs, new File(getDirectory(),
Constants.PACKED_REFS + ".old"));
}
if (logsDir.exists()) {
FileUtils.rename(logsDir,
new File(getDirectory(), Constants.LOGS + ".old"));
}
} else {
packedRefs.delete(); // ignore return value.
FileUtils.delete(logsDir, FileUtils.RECURSIVE);
FileUtils.delete(refsFile, FileUtils.RECURSIVE);
}
// Put new data.
FileUtils.rename(newRefs, refsFile);
refs.close();
refs = new FileReftableDatabase(this, refsFile);
}
/**
* Converts between ref storage formats.
*
* @param format
* the format to convert to, either "reftable" or "refdir"
* @param writeLogs
* whether to write reflogs
* @param backup
* whether to make a backup of the old data
* @throws IOException
* on I/O problems.
*/
@SuppressWarnings("nls")
public void convertRefStorage(String format, boolean writeLogs,
boolean backup) throws IOException {
if (format.equals("reftable")) { //$NON-NLS-1$
if (refs instanceof RefDirectory) {
convertToReftable(writeLogs, backup);
}
} else if (format.equals("refdir")) {//$NON-NLS-1$
if (refs instanceof FileReftableDatabase) {
convertToPackedRefs(backup);
}
} else {
throw new IOException(String.format(
"unknown supported ref storage format '%s'", format));
}
}
}

24
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/GC.java

@ -771,13 +771,26 @@ public class GC {
}
/**
* Packs all non-symbolic, loose refs into packed-refs.
* Pack ref storage. For a RefDirectory database, this packs all
* non-symbolic, loose refs into packed-refs. For Reftable, all of the data
* is compacted into a single table.
*
* @throws java.io.IOException
*/
public void packRefs() throws IOException {
Collection<Ref> refs = repo.getRefDatabase()
.getRefsByPrefix(Constants.R_REFS);
RefDatabase refDb = repo.getRefDatabase();
if (refDb instanceof FileReftableDatabase) {
// TODO: abstract this more cleanly.
pm.beginTask(JGitText.get().packRefs, 1);
try {
((FileReftableDatabase) refDb).compactFully();
} finally {
pm.endTask();
}
return;
}
Collection<Ref> refs = refDb.getRefsByPrefix(Constants.R_REFS);
List<String> refsToBePacked = new ArrayList<>(refs.size());
pm.beginTask(JGitText.get().packRefs, refs.size());
try {
@ -895,7 +908,10 @@ public class GC {
throw new IOException(e);
}
prunePacked();
deleteEmptyRefsFolders();
if (repo.getRefDatabase() instanceof RefDirectory) {
// TODO: abstract this more cleanly.
deleteEmptyRefsFolders();
}
deleteOrphans();
deleteTempPacksIdx();

2
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/reftable/ReftableWriter.java

@ -302,7 +302,7 @@ public class ReftableWriter {
private void throwIllegalEntry(Entry last, Entry now) {
throw new IllegalArgumentException(MessageFormat.format(
JGitText.get().refTableRecordsMustIncrease,
JGitText.get().reftableRecordsMustIncrease,
new String(last.key, UTF_8), new String(now.key, UTF_8)));
}

6
org.eclipse.jgit/src/org/eclipse/jgit/lib/Constants.java

@ -281,6 +281,12 @@ public final class Constants {
*/
public static final String OBJECTS = "objects";
/**
* Reftable folder name
* @since 5.6
*/
public static final String REFTABLE = "reftable";
/** Info refs folder */
public static final String INFO_REFS = "info/refs";

Loading…
Cancel
Save