Browse Source
Reftable is a binary, block-based storage format for the ref-database. It provides several advantages over the traditional packed + loose storage format: * O(1) write performance, even for deletions and transactions. * atomic updates to the ref database. * O(log N) lookup and prefix scans * free from restrictions imposed by the file system: it is case-sensitive even on case-insensitive file systems, and has no inherent limitations for directory/file conflicts * prefix compression reduces space usage for repetitive ref names, such as gerrit's refs/changes/xx/xxxxx format. FileReftableDatabase is based on FileReftableStack, which does compactions inline. This is simple, and has good median performance, but every so often it will rewrite the entire ref database. For testing, a FileReftableTest (mirroring RefUpdateTest) is added to check for Reftable specific behavior. This must be done separately, as reflogs have different semantics. Add a reftable flavor of BatchRefUpdateTest. Add a FileReftableStackTest to exercise compaction. Add FileRepository#convertToReftable so existing testdata can be reused. CQ: 21007 Change-Id: I1837f268e91c6b446cb0155061727dbaccb714b8 Signed-off-by: Han-Wen Nienhuys <hanwen@google.com> Signed-off-by: Matthias Sohn <matthias.sohn@sap.com>next
Han-Wen Nienhuys
5 years ago
committed by
Matthias Sohn
12 changed files with 2551 additions and 51 deletions
@ -0,0 +1,203 @@ |
|||||||
|
/* |
||||||
|
* Copyright (C) 2019 Google LLC |
||||||
|
* and other copyright owners as documented in the project's IP log. |
||||||
|
* |
||||||
|
* This program and the accompanying materials are made available |
||||||
|
* under the terms of the Eclipse Distribution License v1.0 which |
||||||
|
* accompanies this distribution, is reproduced below, and is |
||||||
|
* available at http://www.eclipse.org/org/documents/edl-v10.php
|
||||||
|
* |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or |
||||||
|
* without modification, are permitted provided that the following |
||||||
|
* conditions are met: |
||||||
|
* |
||||||
|
* - Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* |
||||||
|
* - Redistributions in binary form must reproduce the above |
||||||
|
* copyright notice, this list of conditions and the following |
||||||
|
* disclaimer in the documentation and/or other materials provided |
||||||
|
* with the distribution. |
||||||
|
* |
||||||
|
* - Neither the name of the Eclipse Foundation, Inc. nor the |
||||||
|
* names of its contributors may be used to endorse or promote |
||||||
|
* products derived from this software without specific prior |
||||||
|
* written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
||||||
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
||||||
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.eclipse.jgit.internal.storage.file; |
||||||
|
|
||||||
|
import static org.eclipse.jgit.lib.Ref.Storage.PACKED; |
||||||
|
import static org.junit.Assert.assertEquals; |
||||||
|
import static org.junit.Assert.assertTrue; |
||||||
|
|
||||||
|
import java.io.File; |
||||||
|
import java.io.FileNotFoundException; |
||||||
|
import java.io.IOException; |
||||||
|
import java.util.Arrays; |
||||||
|
import java.util.Collections; |
||||||
|
import java.util.List; |
||||||
|
import java.util.stream.Collectors; |
||||||
|
import org.eclipse.jgit.internal.storage.file.FileReftableStack.Segment; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.MergedReftable; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.RefCursor; |
||||||
|
import org.eclipse.jgit.lib.Config; |
||||||
|
import org.eclipse.jgit.lib.ObjectId; |
||||||
|
import org.eclipse.jgit.lib.ObjectIdRef; |
||||||
|
import org.eclipse.jgit.lib.Ref; |
||||||
|
import org.eclipse.jgit.util.FileUtils; |
||||||
|
import org.junit.After; |
||||||
|
import org.junit.Before; |
||||||
|
import org.junit.Rule; |
||||||
|
import org.junit.Test; |
||||||
|
import org.junit.rules.ExpectedException; |
||||||
|
|
||||||
|
public class FileReftableStackTest { |
||||||
|
|
||||||
|
private static Ref newRef(String name, ObjectId id) { |
||||||
|
return new ObjectIdRef.PeeledNonTag(PACKED, name, id); |
||||||
|
} |
||||||
|
|
||||||
|
private File reftableDir; |
||||||
|
|
||||||
|
@Before |
||||||
|
public void setup() throws Exception { |
||||||
|
reftableDir = FileUtils.createTempDir("rtstack", "", null); |
||||||
|
} |
||||||
|
|
||||||
|
@After |
||||||
|
public void tearDown() throws Exception { |
||||||
|
if (reftableDir != null) { |
||||||
|
FileUtils.delete(reftableDir, FileUtils.RECURSIVE); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
void writeBranches(FileReftableStack stack, String template, int start, |
||||||
|
int N) throws IOException { |
||||||
|
for (int i = 0; i < N; i++) { |
||||||
|
while (true) { |
||||||
|
final long next = stack.getMergedReftable().maxUpdateIndex() |
||||||
|
+ 1; |
||||||
|
|
||||||
|
String name = String.format(template, |
||||||
|
Integer.valueOf(start + i)); |
||||||
|
Ref r = newRef(name, ObjectId.zeroId()); |
||||||
|
boolean ok = stack.addReftable(rw -> { |
||||||
|
rw.setMinUpdateIndex(next).setMaxUpdateIndex(next).begin() |
||||||
|
.writeRef(r); |
||||||
|
}); |
||||||
|
if (ok) { |
||||||
|
break; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public void testCompaction(int N) throws Exception { |
||||||
|
try (FileReftableStack stack = new FileReftableStack( |
||||||
|
new File(reftableDir, "refs"), reftableDir, null, |
||||||
|
() -> new Config())) { |
||||||
|
writeBranches(stack, "refs/heads/branch%d", 0, N); |
||||||
|
MergedReftable table = stack.getMergedReftable(); |
||||||
|
for (int i = 1; i < N; i++) { |
||||||
|
String name = String.format("refs/heads/branch%d", |
||||||
|
Integer.valueOf(i)); |
||||||
|
RefCursor c = table.seekRef(name); |
||||||
|
assertTrue(c.next()); |
||||||
|
assertEquals(ObjectId.zeroId(), c.getRef().getObjectId()); |
||||||
|
} |
||||||
|
|
||||||
|
List<String> files = Arrays.asList(reftableDir.listFiles()).stream() |
||||||
|
.map(File::getName).collect(Collectors.toList()); |
||||||
|
Collections.sort(files); |
||||||
|
|
||||||
|
assertTrue(files.size() < 20); |
||||||
|
|
||||||
|
FileReftableStack.CompactionStats stats = stack.getStats(); |
||||||
|
assertEquals(0, stats.failed); |
||||||
|
assertTrue(stats.attempted < N); |
||||||
|
assertTrue(stats.refCount < FileReftableStack.log(N) * N); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testCompaction9() throws Exception { |
||||||
|
testCompaction(9); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testCompaction1024() throws Exception { |
||||||
|
testCompaction(1024); |
||||||
|
} |
||||||
|
|
||||||
|
@Rule |
||||||
|
public final ExpectedException thrown = ExpectedException.none(); |
||||||
|
|
||||||
|
@SuppressWarnings({ "resource", "unused" }) |
||||||
|
@Test |
||||||
|
public void missingReftable() throws Exception { |
||||||
|
try (FileReftableStack stack = new FileReftableStack( |
||||||
|
new File(reftableDir, "refs"), reftableDir, null, |
||||||
|
() -> new Config())) { |
||||||
|
outer: for (int i = 0; i < 10; i++) { |
||||||
|
final long next = stack.getMergedReftable().maxUpdateIndex() |
||||||
|
+ 1; |
||||||
|
String name = String.format("branch%d", Integer.valueOf(i)); |
||||||
|
Ref r = newRef(name, ObjectId.zeroId()); |
||||||
|
boolean ok = stack.addReftable(rw -> { |
||||||
|
rw.setMinUpdateIndex(next).setMaxUpdateIndex(next).begin() |
||||||
|
.writeRef(r); |
||||||
|
}); |
||||||
|
assertTrue(ok); |
||||||
|
|
||||||
|
List<File> files = Arrays.asList(reftableDir.listFiles()); |
||||||
|
for (int j = 0; j < files.size(); j++) { |
||||||
|
File f = files.get(j); |
||||||
|
if (f.getName().endsWith(".ref")) { |
||||||
|
assertTrue(f.delete()); |
||||||
|
break outer; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
thrown.expect(FileNotFoundException.class); |
||||||
|
new FileReftableStack(new File(reftableDir, "refs"), reftableDir, null, |
||||||
|
() -> new Config()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testSegments() { |
||||||
|
long in[] = { 1024, 1024, 1536, 100, 64, 50, 25, 24 }; |
||||||
|
List<Segment> got = FileReftableStack.segmentSizes(in); |
||||||
|
Segment want[] = { new Segment(0, 3, 10, 3584), |
||||||
|
new Segment(3, 5, 6, 164), new Segment(5, 6, 5, 50), |
||||||
|
new Segment(6, 8, 4, 49), }; |
||||||
|
assertEquals(got.size(), want.length); |
||||||
|
for (int i = 0; i < want.length; i++) { |
||||||
|
assertTrue(want[i].equals(got.get(i))); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testLog2() throws Exception { |
||||||
|
assertEquals(10, FileReftableStack.log(1024)); |
||||||
|
assertEquals(10, FileReftableStack.log(1025)); |
||||||
|
assertEquals(10, FileReftableStack.log(2047)); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,553 @@ |
|||||||
|
/* |
||||||
|
* Copyright (C) 2019, Google Inc. |
||||||
|
* and other copyright owners as documented in the project's IP log. |
||||||
|
* |
||||||
|
* This program and the accompanying materials are made available |
||||||
|
* under the terms of the Eclipse Distribution License v1.0 which |
||||||
|
* accompanies this distribution, is reproduced below, and is |
||||||
|
* available at http://www.eclipse.org/org/documents/edl-v10.php
|
||||||
|
* |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or |
||||||
|
* without modification, are permitted provided that the following |
||||||
|
* conditions are met: |
||||||
|
* |
||||||
|
* - Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* |
||||||
|
* - Redistributions in binary form must reproduce the above |
||||||
|
* copyright notice, this list of conditions and the following |
||||||
|
* disclaimer in the documentation and/or other materials provided |
||||||
|
* with the distribution. |
||||||
|
* |
||||||
|
* - Neither the name of the Eclipse Foundation, Inc. nor the |
||||||
|
* names of its contributors may be used to endorse or promote |
||||||
|
* products derived from this software without specific prior |
||||||
|
* written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
||||||
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
||||||
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.eclipse.jgit.internal.storage.file; |
||||||
|
|
||||||
|
import static org.eclipse.jgit.lib.RefUpdate.Result.FAST_FORWARD; |
||||||
|
import static org.eclipse.jgit.lib.RefUpdate.Result.FORCED; |
||||||
|
import static org.eclipse.jgit.lib.RefUpdate.Result.IO_FAILURE; |
||||||
|
import static org.eclipse.jgit.lib.RefUpdate.Result.LOCK_FAILURE; |
||||||
|
import static org.junit.Assert.assertEquals; |
||||||
|
import static org.junit.Assert.assertFalse; |
||||||
|
import static org.junit.Assert.assertNotEquals; |
||||||
|
import static org.junit.Assert.assertNotNull; |
||||||
|
import static org.junit.Assert.assertNotSame; |
||||||
|
import static org.junit.Assert.assertNull; |
||||||
|
import static org.junit.Assert.assertSame; |
||||||
|
import static org.junit.Assert.assertTrue; |
||||||
|
import static org.junit.Assert.fail; |
||||||
|
|
||||||
|
import java.io.File; |
||||||
|
import java.io.IOException; |
||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.List; |
||||||
|
|
||||||
|
import org.eclipse.jgit.lib.AnyObjectId; |
||||||
|
import org.eclipse.jgit.lib.Constants; |
||||||
|
import org.eclipse.jgit.lib.NullProgressMonitor; |
||||||
|
import org.eclipse.jgit.lib.ObjectId; |
||||||
|
import org.eclipse.jgit.lib.PersonIdent; |
||||||
|
import org.eclipse.jgit.lib.Ref; |
||||||
|
import org.eclipse.jgit.lib.RefRename; |
||||||
|
import org.eclipse.jgit.lib.RefUpdate; |
||||||
|
import org.eclipse.jgit.lib.RefUpdate.Result; |
||||||
|
import org.eclipse.jgit.lib.ReflogEntry; |
||||||
|
import org.eclipse.jgit.lib.ReflogReader; |
||||||
|
import org.eclipse.jgit.revwalk.RevWalk; |
||||||
|
import org.eclipse.jgit.test.resources.SampleDataRepositoryTestCase; |
||||||
|
import org.eclipse.jgit.transport.ReceiveCommand; |
||||||
|
import org.junit.Test; |
||||||
|
|
||||||
|
public class FileReftableTest extends SampleDataRepositoryTestCase { |
||||||
|
String bCommit; |
||||||
|
|
||||||
|
@Override |
||||||
|
public void setUp() throws Exception { |
||||||
|
super.setUp(); |
||||||
|
Ref b = db.exactRef("refs/heads/b"); |
||||||
|
bCommit = b.getObjectId().getName(); |
||||||
|
db.convertToReftable(false, false); |
||||||
|
} |
||||||
|
|
||||||
|
@SuppressWarnings("boxing") |
||||||
|
@Test |
||||||
|
public void testRacyReload() throws Exception { |
||||||
|
ObjectId id = db.resolve("master"); |
||||||
|
int retry = 0; |
||||||
|
try (FileRepository repo1 = new FileRepository(db.getDirectory()); |
||||||
|
FileRepository repo2 = new FileRepository(db.getDirectory())) { |
||||||
|
FileRepository repos[] = { repo1, repo2 }; |
||||||
|
for (int i = 0; i < 10; i++) { |
||||||
|
for (int j = 0; j < 2; j++) { |
||||||
|
FileRepository repo = repos[j]; |
||||||
|
RefUpdate u = repo.getRefDatabase().newUpdate( |
||||||
|
String.format("branch%d", i * 10 + j), false); |
||||||
|
|
||||||
|
u.setNewObjectId(id); |
||||||
|
RefUpdate.Result r = u.update(); |
||||||
|
if (!r.equals(Result.NEW)) { |
||||||
|
retry++; |
||||||
|
u = repo.getRefDatabase().newUpdate( |
||||||
|
String.format("branch%d", i * 10 + j), false); |
||||||
|
|
||||||
|
u.setNewObjectId(id); |
||||||
|
r = u.update(); |
||||||
|
assertEquals(r, Result.NEW); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// only the first one succeeds
|
||||||
|
assertEquals(retry, 19); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testCompactFully() throws Exception { |
||||||
|
ObjectId c1 = db.resolve("master^^"); |
||||||
|
ObjectId c2 = db.resolve("master^"); |
||||||
|
for (int i = 0; i < 5; i++) { |
||||||
|
RefUpdate u = db.updateRef("refs/heads/master"); |
||||||
|
u.setForceUpdate(true); |
||||||
|
u.setNewObjectId((i%2) == 0 ? c1 : c2); |
||||||
|
assertEquals(u.update(), FORCED); |
||||||
|
} |
||||||
|
|
||||||
|
File tableDir = new File(db.getDirectory(), Constants.REFTABLE); |
||||||
|
assertTrue(tableDir.listFiles().length > 1); |
||||||
|
((FileReftableDatabase)db.getRefDatabase()).compactFully(); |
||||||
|
assertEquals(tableDir.listFiles().length,1); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testConvert() throws Exception { |
||||||
|
Ref h = db.exactRef("HEAD"); |
||||||
|
assertTrue(h.isSymbolic()); |
||||||
|
assertEquals("refs/heads/master", h.getTarget().getName()); |
||||||
|
|
||||||
|
Ref b = db.exactRef("refs/heads/b"); |
||||||
|
assertFalse(b.isSymbolic()); |
||||||
|
assertTrue(b.isPeeled()); |
||||||
|
assertEquals(bCommit, b.getObjectId().name()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testConvertToRefdir() throws Exception { |
||||||
|
db.convertToPackedRefs(false); |
||||||
|
assertTrue(db.getRefDatabase() instanceof RefDirectory); |
||||||
|
Ref h = db.exactRef("HEAD"); |
||||||
|
assertTrue(h.isSymbolic()); |
||||||
|
assertEquals("refs/heads/master", h.getTarget().getName()); |
||||||
|
|
||||||
|
Ref b = db.exactRef("refs/heads/b"); |
||||||
|
assertFalse(b.isSymbolic()); |
||||||
|
assertTrue(b.isPeeled()); |
||||||
|
assertEquals(bCommit, b.getObjectId().name()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testBatchrefUpdate() throws Exception { |
||||||
|
ObjectId cur = db.resolve("master"); |
||||||
|
ObjectId prev = db.resolve("master^"); |
||||||
|
|
||||||
|
PersonIdent person = new PersonIdent("name", "mail@example.com"); |
||||||
|
ReceiveCommand rc1 = new ReceiveCommand(ObjectId.zeroId(), cur, "refs/heads/batch1"); |
||||||
|
ReceiveCommand rc2 = new ReceiveCommand(ObjectId.zeroId(), prev, "refs/heads/batch2"); |
||||||
|
String msg = "message"; |
||||||
|
try (RevWalk rw = new RevWalk(db)) { |
||||||
|
db.getRefDatabase().newBatchUpdate() |
||||||
|
.addCommand(rc1, rc2) |
||||||
|
.setAtomic(true) |
||||||
|
.setRefLogIdent(person) |
||||||
|
.setRefLogMessage(msg, false) |
||||||
|
.execute(rw, NullProgressMonitor.INSTANCE); |
||||||
|
} |
||||||
|
|
||||||
|
assertEquals(rc1.getResult(), ReceiveCommand.Result.OK); |
||||||
|
assertEquals(rc2.getResult(), ReceiveCommand.Result.OK); |
||||||
|
|
||||||
|
ReflogEntry e = db.getReflogReader("refs/heads/batch1").getLastEntry(); |
||||||
|
assertEquals(msg, e.getComment()); |
||||||
|
assertEquals(person, e.getWho()); |
||||||
|
assertEquals(cur, e.getNewId()); |
||||||
|
|
||||||
|
e = db.getReflogReader("refs/heads/batch2").getLastEntry(); |
||||||
|
assertEquals(msg, e.getComment()); |
||||||
|
assertEquals(person, e.getWho()); |
||||||
|
assertEquals(prev, e.getNewId()); |
||||||
|
|
||||||
|
assertEquals(cur, db.exactRef("refs/heads/batch1").getObjectId()); |
||||||
|
assertEquals(prev, db.exactRef("refs/heads/batch2").getObjectId()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testFastforwardStatus() throws Exception { |
||||||
|
ObjectId cur = db.resolve("master"); |
||||||
|
ObjectId prev = db.resolve("master^"); |
||||||
|
RefUpdate u = db.updateRef("refs/heads/master"); |
||||||
|
|
||||||
|
u.setNewObjectId(prev); |
||||||
|
u.setForceUpdate(true); |
||||||
|
assertEquals(FORCED, u.update()); |
||||||
|
|
||||||
|
RefUpdate u2 = db.updateRef("refs/heads/master"); |
||||||
|
|
||||||
|
u2.setNewObjectId(cur); |
||||||
|
assertEquals(FAST_FORWARD, u2.update()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testUpdateChecksOldValue() throws Exception { |
||||||
|
ObjectId cur = db.resolve("master"); |
||||||
|
ObjectId prev = db.resolve("master^"); |
||||||
|
RefUpdate u1 = db.updateRef("refs/heads/master"); |
||||||
|
RefUpdate u2 = db.updateRef("refs/heads/master"); |
||||||
|
|
||||||
|
u1.setExpectedOldObjectId(cur); |
||||||
|
u1.setNewObjectId(prev); |
||||||
|
u1.setForceUpdate(true); |
||||||
|
|
||||||
|
u2.setExpectedOldObjectId(cur); |
||||||
|
u2.setNewObjectId(prev); |
||||||
|
u2.setForceUpdate(true); |
||||||
|
|
||||||
|
assertEquals(FORCED, u1.update()); |
||||||
|
assertEquals(LOCK_FAILURE, u2.update()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testWritesymref() throws Exception { |
||||||
|
writeSymref(Constants.HEAD, "refs/heads/a"); |
||||||
|
assertNotNull(db.exactRef("refs/heads/b")); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testFastforwardStatus2() throws Exception { |
||||||
|
writeSymref(Constants.HEAD, "refs/heads/a"); |
||||||
|
ObjectId bId = db.exactRef("refs/heads/b").getObjectId(); |
||||||
|
RefUpdate u = db.updateRef("refs/heads/a"); |
||||||
|
u.setNewObjectId(bId); |
||||||
|
u.setRefLogMessage("Setup", false); |
||||||
|
assertEquals(FAST_FORWARD, u.update()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testDelete() throws Exception { |
||||||
|
RefUpdate up = db.getRefDatabase().newUpdate("refs/heads/a", false); |
||||||
|
up.setForceUpdate(true); |
||||||
|
RefUpdate.Result res = up.delete(); |
||||||
|
assertEquals(res, FORCED); |
||||||
|
assertNull(db.exactRef("refs/heads/a")); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testDeleteWithoutHead() throws IOException { |
||||||
|
// Prepare repository without HEAD
|
||||||
|
RefUpdate refUpdate = db.updateRef(Constants.HEAD, true); |
||||||
|
refUpdate.setForceUpdate(true); |
||||||
|
refUpdate.setNewObjectId(ObjectId.zeroId()); |
||||||
|
|
||||||
|
RefUpdate.Result updateResult = refUpdate.update(); |
||||||
|
assertEquals(FORCED, updateResult); |
||||||
|
|
||||||
|
Ref r = db.exactRef("HEAD"); |
||||||
|
assertEquals(ObjectId.zeroId(), r.getObjectId()); |
||||||
|
RefUpdate.Result deleteHeadResult = db.updateRef(Constants.HEAD) |
||||||
|
.delete(); |
||||||
|
|
||||||
|
// why does doDelete say NEW ?
|
||||||
|
assertEquals(RefUpdate.Result.NO_CHANGE, deleteHeadResult); |
||||||
|
|
||||||
|
// Any result is ok as long as it's not an NPE
|
||||||
|
db.updateRef(Constants.R_HEADS + "master").delete(); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testUpdateRefDetached() throws Exception { |
||||||
|
ObjectId pid = db.resolve("refs/heads/master"); |
||||||
|
ObjectId ppid = db.resolve("refs/heads/master^"); |
||||||
|
RefUpdate updateRef = db.updateRef("HEAD", true); |
||||||
|
updateRef.setForceUpdate(true); |
||||||
|
updateRef.setNewObjectId(ppid); |
||||||
|
RefUpdate.Result update = updateRef.update(); |
||||||
|
assertEquals(FORCED, update); |
||||||
|
assertEquals(ppid, db.resolve("HEAD")); |
||||||
|
Ref ref = db.exactRef("HEAD"); |
||||||
|
assertEquals("HEAD", ref.getName()); |
||||||
|
assertTrue("is detached", !ref.isSymbolic()); |
||||||
|
|
||||||
|
// the branch HEAD referred to is left untouched
|
||||||
|
assertEquals(pid, db.resolve("refs/heads/master")); |
||||||
|
ReflogReader reflogReader = db.getReflogReader("HEAD"); |
||||||
|
ReflogEntry e = reflogReader.getReverseEntries().get(0); |
||||||
|
assertEquals(ppid, e.getNewId()); |
||||||
|
assertEquals("GIT_COMMITTER_EMAIL", e.getWho().getEmailAddress()); |
||||||
|
assertEquals("GIT_COMMITTER_NAME", e.getWho().getName()); |
||||||
|
assertEquals(1250379778000L, e.getWho().getWhen().getTime()); |
||||||
|
assertEquals(pid, e.getOldId()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testWriteReflog() throws Exception { |
||||||
|
ObjectId pid = db.resolve("refs/heads/master^"); |
||||||
|
RefUpdate updateRef = db.updateRef("refs/heads/master"); |
||||||
|
updateRef.setNewObjectId(pid); |
||||||
|
String msg = "REFLOG!"; |
||||||
|
updateRef.setRefLogMessage(msg, true); |
||||||
|
PersonIdent person = new PersonIdent("name", "mail@example.com"); |
||||||
|
updateRef.setRefLogIdent(person); |
||||||
|
updateRef.setForceUpdate(true); |
||||||
|
RefUpdate.Result update = updateRef.update(); |
||||||
|
assertEquals(FORCED, update); // internal
|
||||||
|
ReflogReader r = db.getReflogReader("refs/heads/master"); |
||||||
|
|
||||||
|
ReflogEntry e = r.getLastEntry(); |
||||||
|
assertEquals(e.getNewId(), pid); |
||||||
|
assertEquals(e.getComment(), "REFLOG!: FORCED"); |
||||||
|
assertEquals(e.getWho(), person); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testLooseDelete() throws IOException { |
||||||
|
final String newRef = "refs/heads/abc"; |
||||||
|
assertNull(db.exactRef(newRef)); |
||||||
|
|
||||||
|
RefUpdate ref = db.updateRef(newRef); |
||||||
|
ObjectId nonZero = db.resolve(Constants.HEAD); |
||||||
|
assertNotEquals(nonZero, ObjectId.zeroId()); |
||||||
|
ref.setNewObjectId(nonZero); |
||||||
|
assertEquals(RefUpdate.Result.NEW, ref.update()); |
||||||
|
|
||||||
|
ref = db.updateRef(newRef); |
||||||
|
ref.setNewObjectId(db.resolve(Constants.HEAD)); |
||||||
|
|
||||||
|
assertEquals(ref.delete(), RefUpdate.Result.NO_CHANGE); |
||||||
|
|
||||||
|
// Differs from RefupdateTest. Deleting a loose ref leaves reflog trail.
|
||||||
|
ReflogReader reader = db.getReflogReader("refs/heads/abc"); |
||||||
|
assertEquals(ObjectId.zeroId(), reader.getReverseEntry(1).getOldId()); |
||||||
|
assertEquals(nonZero, reader.getReverseEntry(1).getNewId()); |
||||||
|
assertEquals(nonZero, reader.getReverseEntry(0).getOldId()); |
||||||
|
assertEquals(ObjectId.zeroId(), reader.getReverseEntry(0).getNewId()); |
||||||
|
} |
||||||
|
|
||||||
|
private static class SubclassedId extends ObjectId { |
||||||
|
SubclassedId(AnyObjectId src) { |
||||||
|
super(src); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testNoCacheObjectIdSubclass() throws IOException { |
||||||
|
final String newRef = "refs/heads/abc"; |
||||||
|
final RefUpdate ru = updateRef(newRef); |
||||||
|
final SubclassedId newid = new SubclassedId(ru.getNewObjectId()); |
||||||
|
ru.setNewObjectId(newid); |
||||||
|
RefUpdate.Result update = ru.update(); |
||||||
|
assertEquals(RefUpdate.Result.NEW, update); |
||||||
|
Ref r = db.exactRef(newRef); |
||||||
|
assertEquals(newRef, r.getName()); |
||||||
|
assertNotNull(r.getObjectId()); |
||||||
|
assertNotSame(newid, r.getObjectId()); |
||||||
|
assertSame(ObjectId.class, r.getObjectId().getClass()); |
||||||
|
assertEquals(newid, r.getObjectId()); |
||||||
|
List<ReflogEntry> reverseEntries1 = db.getReflogReader("refs/heads/abc") |
||||||
|
.getReverseEntries(); |
||||||
|
ReflogEntry entry1 = reverseEntries1.get(0); |
||||||
|
assertEquals(1, reverseEntries1.size()); |
||||||
|
assertEquals(ObjectId.zeroId(), entry1.getOldId()); |
||||||
|
assertEquals(r.getObjectId(), entry1.getNewId()); |
||||||
|
|
||||||
|
assertEquals(new PersonIdent(db).toString(), |
||||||
|
entry1.getWho().toString()); |
||||||
|
assertEquals("", entry1.getComment()); |
||||||
|
List<ReflogEntry> reverseEntries2 = db.getReflogReader("HEAD") |
||||||
|
.getReverseEntries(); |
||||||
|
assertEquals(0, reverseEntries2.size()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testDeleteSymref() throws IOException { |
||||||
|
RefUpdate dst = updateRef("refs/heads/abc"); |
||||||
|
assertEquals(RefUpdate.Result.NEW, dst.update()); |
||||||
|
ObjectId id = dst.getNewObjectId(); |
||||||
|
|
||||||
|
RefUpdate u = db.updateRef("refs/symref"); |
||||||
|
assertEquals(RefUpdate.Result.NEW, u.link(dst.getName())); |
||||||
|
|
||||||
|
Ref ref = db.exactRef(u.getName()); |
||||||
|
assertNotNull(ref); |
||||||
|
assertTrue(ref.isSymbolic()); |
||||||
|
assertEquals(dst.getName(), ref.getLeaf().getName()); |
||||||
|
assertEquals(id, ref.getLeaf().getObjectId()); |
||||||
|
|
||||||
|
u = db.updateRef(u.getName()); |
||||||
|
u.setDetachingSymbolicRef(); |
||||||
|
u.setForceUpdate(true); |
||||||
|
assertEquals(FORCED, u.delete()); |
||||||
|
|
||||||
|
assertNull(db.exactRef(u.getName())); |
||||||
|
ref = db.exactRef(dst.getName()); |
||||||
|
assertNotNull(ref); |
||||||
|
assertFalse(ref.isSymbolic()); |
||||||
|
assertEquals(id, ref.getObjectId()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void writeUnbornHead() throws Exception { |
||||||
|
RefUpdate.Result r = db.updateRef("HEAD").link("refs/heads/unborn"); |
||||||
|
assertEquals(FORCED, r); |
||||||
|
|
||||||
|
Ref head = db.exactRef("HEAD"); |
||||||
|
assertTrue(head.isSymbolic()); |
||||||
|
assertEquals(head.getTarget().getName(), "refs/heads/unborn"); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Update the HEAD ref when the referenced branch is unborn |
||||||
|
* |
||||||
|
* @throws Exception |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testUpdateRefDetachedUnbornHead() throws Exception { |
||||||
|
ObjectId ppid = db.resolve("refs/heads/master^"); |
||||||
|
writeSymref("HEAD", "refs/heads/unborn"); |
||||||
|
RefUpdate updateRef = db.updateRef("HEAD", true); |
||||||
|
updateRef.setForceUpdate(true); |
||||||
|
updateRef.setNewObjectId(ppid); |
||||||
|
RefUpdate.Result update = updateRef.update(); |
||||||
|
assertEquals(RefUpdate.Result.NEW, update); |
||||||
|
assertEquals(ppid, db.resolve("HEAD")); |
||||||
|
Ref ref = db.exactRef("HEAD"); |
||||||
|
assertEquals("HEAD", ref.getName()); |
||||||
|
assertTrue("is detached", !ref.isSymbolic()); |
||||||
|
|
||||||
|
// the branch HEAD referred to is left untouched
|
||||||
|
assertNull(db.resolve("refs/heads/unborn")); |
||||||
|
ReflogReader reflogReader = db.getReflogReader("HEAD"); |
||||||
|
ReflogEntry e = reflogReader.getReverseEntries().get(0); |
||||||
|
assertEquals(ObjectId.zeroId(), e.getOldId()); |
||||||
|
assertEquals(ppid, e.getNewId()); |
||||||
|
assertEquals("GIT_COMMITTER_EMAIL", e.getWho().getEmailAddress()); |
||||||
|
assertEquals("GIT_COMMITTER_NAME", e.getWho().getName()); |
||||||
|
assertEquals(1250379778000L, e.getWho().getWhen().getTime()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testDeleteNotFound() throws IOException { |
||||||
|
RefUpdate ref = updateRef("refs/heads/doesnotexist"); |
||||||
|
assertNull(db.exactRef(ref.getName())); |
||||||
|
assertEquals(RefUpdate.Result.NEW, ref.delete()); |
||||||
|
assertNull(db.exactRef(ref.getName())); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testRenameSymref() throws IOException { |
||||||
|
db.resolve("HEAD"); |
||||||
|
RefRename r = db.renameRef("HEAD", "KOPF"); |
||||||
|
assertEquals(IO_FAILURE, r.rename()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testRenameCurrentBranch() throws IOException { |
||||||
|
ObjectId rb = db.resolve("refs/heads/b"); |
||||||
|
writeSymref(Constants.HEAD, "refs/heads/b"); |
||||||
|
ObjectId oldHead = db.resolve(Constants.HEAD); |
||||||
|
assertEquals("internal test condition, b == HEAD", oldHead, rb); |
||||||
|
RefRename renameRef = db.renameRef("refs/heads/b", |
||||||
|
"refs/heads/new/name"); |
||||||
|
RefUpdate.Result result = renameRef.rename(); |
||||||
|
assertEquals(RefUpdate.Result.RENAMED, result); |
||||||
|
assertEquals(rb, db.resolve("refs/heads/new/name")); |
||||||
|
assertNull(db.resolve("refs/heads/b")); |
||||||
|
assertEquals(rb, db.resolve(Constants.HEAD)); |
||||||
|
|
||||||
|
List<String> names = new ArrayList<>(); |
||||||
|
names.add("HEAD"); |
||||||
|
names.add("refs/heads/b"); |
||||||
|
names.add("refs/heads/new/name"); |
||||||
|
|
||||||
|
for (String nm : names) { |
||||||
|
ReflogReader rd = db.getReflogReader(nm); |
||||||
|
assertNotNull(rd); |
||||||
|
ReflogEntry last = rd.getLastEntry(); |
||||||
|
ObjectId id = last.getNewId(); |
||||||
|
assertTrue(ObjectId.zeroId().equals(id) || rb.equals(id)); |
||||||
|
|
||||||
|
id = last.getNewId(); |
||||||
|
assertTrue(ObjectId.zeroId().equals(id) || rb.equals(id)); |
||||||
|
|
||||||
|
String want = "Branch: renamed b to new/name"; |
||||||
|
assertEquals(want, last.getComment()); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testRenameDestExists() throws IOException { |
||||||
|
ObjectId rb = db.resolve("refs/heads/b"); |
||||||
|
writeSymref(Constants.HEAD, "refs/heads/b"); |
||||||
|
ObjectId oldHead = db.resolve(Constants.HEAD); |
||||||
|
assertEquals("internal test condition, b == HEAD", oldHead, rb); |
||||||
|
RefRename renameRef = db.renameRef("refs/heads/b", "refs/heads/a"); |
||||||
|
RefUpdate.Result result = renameRef.rename(); |
||||||
|
assertEquals(RefUpdate.Result.LOCK_FAILURE, result); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testRenameAtomic() throws IOException { |
||||||
|
ObjectId prevId = db.resolve("refs/heads/master^"); |
||||||
|
|
||||||
|
RefRename rename = db.renameRef("refs/heads/master", |
||||||
|
"refs/heads/newmaster"); |
||||||
|
|
||||||
|
RefUpdate updateRef = db.updateRef("refs/heads/master"); |
||||||
|
updateRef.setNewObjectId(prevId); |
||||||
|
updateRef.setForceUpdate(true); |
||||||
|
assertEquals(FORCED, updateRef.update()); |
||||||
|
assertEquals(RefUpdate.Result.LOCK_FAILURE, rename.rename()); |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void reftableRefsStorageClass() throws IOException { |
||||||
|
Ref b = db.exactRef("refs/heads/b"); |
||||||
|
assertEquals(Ref.Storage.PACKED, b.getStorage()); |
||||||
|
} |
||||||
|
|
||||||
|
private RefUpdate updateRef(String name) throws IOException { |
||||||
|
final RefUpdate ref = db.updateRef(name); |
||||||
|
ref.setNewObjectId(db.resolve(Constants.HEAD)); |
||||||
|
return ref; |
||||||
|
} |
||||||
|
|
||||||
|
private void writeSymref(String src, String dst) throws IOException { |
||||||
|
RefUpdate u = db.updateRef(src); |
||||||
|
switch (u.link(dst)) { |
||||||
|
case NEW: |
||||||
|
case FORCED: |
||||||
|
case NO_CHANGE: |
||||||
|
break; |
||||||
|
default: |
||||||
|
fail("link " + src + " to " + dst); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,651 @@ |
|||||||
|
/* |
||||||
|
* Copyright (C) 2019 Google LLC |
||||||
|
* and other copyright owners as documented in the project's IP log. |
||||||
|
* |
||||||
|
* This program and the accompanying materials are made available |
||||||
|
* under the terms of the Eclipse Distribution License v1.0 which |
||||||
|
* accompanies this distribution, is reproduced below, and is |
||||||
|
* available at http://www.eclipse.org/org/documents/edl-v10.php
|
||||||
|
* |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or |
||||||
|
* without modification, are permitted provided that the following |
||||||
|
* conditions are met: |
||||||
|
* |
||||||
|
* - Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* |
||||||
|
* - Redistributions in binary form must reproduce the above |
||||||
|
* copyright notice, this list of conditions and the following |
||||||
|
* disclaimer in the documentation and/or other materials provided |
||||||
|
* with the distribution. |
||||||
|
* |
||||||
|
* - Neither the name of the Eclipse Foundation, Inc. nor the |
||||||
|
* names of its contributors may be used to endorse or promote |
||||||
|
* products derived from this software without specific prior |
||||||
|
* written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
||||||
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
||||||
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.eclipse.jgit.internal.storage.file; |
||||||
|
|
||||||
|
import static org.eclipse.jgit.lib.Ref.UNDEFINED_UPDATE_INDEX; |
||||||
|
import static org.eclipse.jgit.lib.Ref.Storage.NEW; |
||||||
|
import static org.eclipse.jgit.lib.Ref.Storage.PACKED; |
||||||
|
|
||||||
|
import java.io.File; |
||||||
|
import java.io.IOException; |
||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.Collections; |
||||||
|
import java.util.HashSet; |
||||||
|
import java.util.List; |
||||||
|
import java.util.Map; |
||||||
|
import java.util.TreeSet; |
||||||
|
import java.util.concurrent.locks.ReentrantLock; |
||||||
|
import java.util.stream.Collectors; |
||||||
|
|
||||||
|
import org.eclipse.jgit.annotations.NonNull; |
||||||
|
import org.eclipse.jgit.events.RefsChangedEvent; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.MergedReftable; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableBatchRefUpdate; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableDatabase; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableWriter; |
||||||
|
import org.eclipse.jgit.lib.BatchRefUpdate; |
||||||
|
import org.eclipse.jgit.lib.Constants; |
||||||
|
import org.eclipse.jgit.lib.ObjectId; |
||||||
|
import org.eclipse.jgit.lib.ObjectIdRef; |
||||||
|
import org.eclipse.jgit.lib.PersonIdent; |
||||||
|
import org.eclipse.jgit.lib.Ref; |
||||||
|
import org.eclipse.jgit.lib.RefDatabase; |
||||||
|
import org.eclipse.jgit.lib.RefRename; |
||||||
|
import org.eclipse.jgit.lib.RefUpdate; |
||||||
|
import org.eclipse.jgit.lib.ReflogEntry; |
||||||
|
import org.eclipse.jgit.lib.ReflogReader; |
||||||
|
import org.eclipse.jgit.lib.Repository; |
||||||
|
import org.eclipse.jgit.lib.SymbolicRef; |
||||||
|
import org.eclipse.jgit.revwalk.RevObject; |
||||||
|
import org.eclipse.jgit.revwalk.RevTag; |
||||||
|
import org.eclipse.jgit.revwalk.RevWalk; |
||||||
|
import org.eclipse.jgit.transport.ReceiveCommand; |
||||||
|
import org.eclipse.jgit.util.FileUtils; |
||||||
|
import org.eclipse.jgit.util.RefList; |
||||||
|
import org.eclipse.jgit.util.RefMap; |
||||||
|
|
||||||
|
/** |
||||||
|
* Implements RefDatabase using reftable for storage. |
||||||
|
* |
||||||
|
* This class is threadsafe. |
||||||
|
*/ |
||||||
|
public class FileReftableDatabase extends RefDatabase { |
||||||
|
private final ReftableDatabase reftableDatabase; |
||||||
|
|
||||||
|
private final FileRepository fileRepository; |
||||||
|
|
||||||
|
private final FileReftableStack reftableStack; |
||||||
|
|
||||||
|
FileReftableDatabase(FileRepository repo, File refstackName) throws IOException { |
||||||
|
this.fileRepository = repo; |
||||||
|
this.reftableStack = new FileReftableStack(refstackName, |
||||||
|
new File(fileRepository.getDirectory(), Constants.REFTABLE), |
||||||
|
() -> fileRepository.fireEvent(new RefsChangedEvent()), |
||||||
|
() -> fileRepository.getConfig()); |
||||||
|
this.reftableDatabase = new ReftableDatabase() { |
||||||
|
|
||||||
|
@Override |
||||||
|
public MergedReftable openMergedReftable() throws IOException { |
||||||
|
return reftableStack.getMergedReftable(); |
||||||
|
} |
||||||
|
}; |
||||||
|
} |
||||||
|
|
||||||
|
ReflogReader getReflogReader(String refname) throws IOException { |
||||||
|
return reftableDatabase.getReflogReader(refname); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @param repoDir |
||||||
|
* @return whether the given repo uses reftable for refdb storage. |
||||||
|
*/ |
||||||
|
public static boolean isReftable(File repoDir) { |
||||||
|
return new File(repoDir, "refs").isFile() //$NON-NLS-1$
|
||||||
|
&& new File(repoDir, Constants.REFTABLE).isDirectory(); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Runs a full compaction for GC purposes. |
||||||
|
* @throws IOException on I/O errors |
||||||
|
*/ |
||||||
|
public void compactFully() throws IOException { |
||||||
|
reftableDatabase.getLock().lock(); |
||||||
|
try { |
||||||
|
reftableStack.compactFully(); |
||||||
|
} finally { |
||||||
|
reftableDatabase.getLock().unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private ReentrantLock getLock() { |
||||||
|
return reftableDatabase.getLock(); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public boolean performsAtomicTransactions() { |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@NonNull |
||||||
|
@Override |
||||||
|
public BatchRefUpdate newBatchUpdate() { |
||||||
|
return new FileReftableBatchRefUpdate(this, fileRepository); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public RefUpdate newUpdate(String refName, boolean detach) |
||||||
|
throws IOException { |
||||||
|
boolean detachingSymbolicRef = false; |
||||||
|
Ref ref = exactRef(refName); |
||||||
|
|
||||||
|
if (ref == null) { |
||||||
|
ref = new ObjectIdRef.Unpeeled(NEW, refName, null); |
||||||
|
} else { |
||||||
|
detachingSymbolicRef = detach && ref.isSymbolic(); |
||||||
|
} |
||||||
|
|
||||||
|
RefUpdate update = new FileReftableRefUpdate(ref); |
||||||
|
if (detachingSymbolicRef) { |
||||||
|
update.setDetachingSymbolicRef(); |
||||||
|
} |
||||||
|
return update; |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public Ref exactRef(String name) throws IOException { |
||||||
|
return reftableDatabase.exactRef(name); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public List<Ref> getRefs() throws IOException { |
||||||
|
return super.getRefs(); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public Map<String, Ref> getRefs(String prefix) throws IOException { |
||||||
|
List<Ref> refs = reftableDatabase.getRefsByPrefix(prefix); |
||||||
|
RefList.Builder<Ref> builder = new RefList.Builder<>(refs.size()); |
||||||
|
for (Ref r : refs) { |
||||||
|
builder.add(r); |
||||||
|
} |
||||||
|
return new RefMap(prefix, builder.toRefList(), RefList.emptyList(), |
||||||
|
RefList.emptyList()); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public List<Ref> getAdditionalRefs() throws IOException { |
||||||
|
return Collections.emptyList(); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public Ref peel(Ref ref) throws IOException { |
||||||
|
Ref oldLeaf = ref.getLeaf(); |
||||||
|
if (oldLeaf.isPeeled() || oldLeaf.getObjectId() == null) { |
||||||
|
return ref; |
||||||
|
} |
||||||
|
return recreate(ref, doPeel(oldLeaf), hasVersioning()); |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
private Ref doPeel(Ref leaf) throws IOException { |
||||||
|
try (RevWalk rw = new RevWalk(fileRepository)) { |
||||||
|
RevObject obj = rw.parseAny(leaf.getObjectId()); |
||||||
|
if (obj instanceof RevTag) { |
||||||
|
return new ObjectIdRef.PeeledTag(leaf.getStorage(), |
||||||
|
leaf.getName(), leaf.getObjectId(), rw.peel(obj).copy(), |
||||||
|
hasVersioning() ? leaf.getUpdateIndex() |
||||||
|
: UNDEFINED_UPDATE_INDEX); |
||||||
|
} |
||||||
|
return new ObjectIdRef.PeeledNonTag(leaf.getStorage(), |
||||||
|
leaf.getName(), leaf.getObjectId(), |
||||||
|
hasVersioning() ? leaf.getUpdateIndex() |
||||||
|
: UNDEFINED_UPDATE_INDEX); |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private static Ref recreate(Ref old, Ref leaf, boolean hasVersioning) { |
||||||
|
if (old.isSymbolic()) { |
||||||
|
Ref dst = recreate(old.getTarget(), leaf, hasVersioning); |
||||||
|
return new SymbolicRef(old.getName(), dst, |
||||||
|
hasVersioning ? old.getUpdateIndex() |
||||||
|
: UNDEFINED_UPDATE_INDEX); |
||||||
|
} |
||||||
|
return leaf; |
||||||
|
} |
||||||
|
|
||||||
|
private class FileRefRename extends RefRename { |
||||||
|
FileRefRename(RefUpdate src, RefUpdate dst) { |
||||||
|
super(src, dst); |
||||||
|
} |
||||||
|
|
||||||
|
void writeRename(ReftableWriter w) throws IOException { |
||||||
|
long idx = reftableDatabase.nextUpdateIndex(); |
||||||
|
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin(); |
||||||
|
List<Ref> refs = new ArrayList<>(3); |
||||||
|
|
||||||
|
Ref dest = destination.getRef(); |
||||||
|
Ref head = exactRef(Constants.HEAD); |
||||||
|
if (head != null && head.isSymbolic() |
||||||
|
&& head.getLeaf().getName().equals(source.getName())) { |
||||||
|
head = new SymbolicRef(Constants.HEAD, dest, idx); |
||||||
|
refs.add(head); |
||||||
|
} |
||||||
|
|
||||||
|
ObjectId objId = source.getRef().getObjectId(); |
||||||
|
|
||||||
|
// XXX should we check if the source is a Tag vs. NonTag?
|
||||||
|
refs.add(new ObjectIdRef.PeeledNonTag(Ref.Storage.NEW, |
||||||
|
destination.getName(), objId)); |
||||||
|
refs.add(new ObjectIdRef.Unpeeled(Ref.Storage.NEW, source.getName(), |
||||||
|
null)); |
||||||
|
|
||||||
|
w.sortAndWriteRefs(refs); |
||||||
|
PersonIdent who = destination.getRefLogIdent(); |
||||||
|
if (who == null) { |
||||||
|
who = new PersonIdent(fileRepository); |
||||||
|
} |
||||||
|
|
||||||
|
if (!destination.getRefLogMessage().isEmpty()) { |
||||||
|
List<String> refnames = refs.stream().map(r -> r.getName()) |
||||||
|
.collect(Collectors.toList()); |
||||||
|
Collections.sort(refnames); |
||||||
|
for (String s : refnames) { |
||||||
|
ObjectId old = (Constants.HEAD.equals(s) |
||||||
|
|| s.equals(source.getName())) ? objId |
||||||
|
: ObjectId.zeroId(); |
||||||
|
ObjectId newId = (Constants.HEAD.equals(s) |
||||||
|
|| s.equals(destination.getName())) ? objId |
||||||
|
: ObjectId.zeroId(); |
||||||
|
|
||||||
|
w.writeLog(s, idx, who, old, newId, |
||||||
|
destination.getRefLogMessage()); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected RefUpdate.Result doRename() throws IOException { |
||||||
|
Ref src = exactRef(source.getName()); |
||||||
|
if (exactRef(destination.getName()) != null || src == null |
||||||
|
|| !source.getOldObjectId().equals(src.getObjectId())) { |
||||||
|
return RefUpdate.Result.LOCK_FAILURE; |
||||||
|
} |
||||||
|
|
||||||
|
if (src.isSymbolic()) { |
||||||
|
// We could support this, but this is easier and compatible.
|
||||||
|
return RefUpdate.Result.IO_FAILURE; |
||||||
|
} |
||||||
|
|
||||||
|
if (!addReftable(this::writeRename)) { |
||||||
|
return RefUpdate.Result.LOCK_FAILURE; |
||||||
|
} |
||||||
|
|
||||||
|
return RefUpdate.Result.RENAMED; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public RefRename newRename(String fromName, String toName) |
||||||
|
throws IOException { |
||||||
|
RefUpdate src = newUpdate(fromName, true); |
||||||
|
RefUpdate dst = newUpdate(toName, true); |
||||||
|
return new FileRefRename(src, dst); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public boolean isNameConflicting(String name) throws IOException { |
||||||
|
return reftableDatabase.isNameConflicting(name, new TreeSet<>(), |
||||||
|
new HashSet<>()); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public void close() { |
||||||
|
reftableStack.close(); |
||||||
|
} |
||||||
|
|
||||||
|
/** {@inheritDoc} */ |
||||||
|
@Override |
||||||
|
public void create() throws IOException { |
||||||
|
FileUtils.mkdir( |
||||||
|
new File(fileRepository.getDirectory(), Constants.REFTABLE), |
||||||
|
true); |
||||||
|
} |
||||||
|
|
||||||
|
private boolean addReftable(FileReftableStack.Writer w) throws IOException { |
||||||
|
if (!reftableStack.addReftable(w)) { |
||||||
|
reftableStack.reload(); |
||||||
|
reftableDatabase.clearCache(); |
||||||
|
return false; |
||||||
|
} |
||||||
|
reftableDatabase.clearCache(); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
private class FileReftableBatchRefUpdate extends ReftableBatchRefUpdate { |
||||||
|
FileReftableBatchRefUpdate(FileReftableDatabase db, |
||||||
|
Repository repository) { |
||||||
|
super(db, db.reftableDatabase, db.getLock(), repository); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected void applyUpdates(List<Ref> newRefs, |
||||||
|
List<ReceiveCommand> pending) throws IOException { |
||||||
|
if (!addReftable(rw -> write(rw, newRefs, pending))) { |
||||||
|
for (ReceiveCommand c : pending) { |
||||||
|
if (c.getResult() == ReceiveCommand.Result.NOT_ATTEMPTED) { |
||||||
|
c.setResult(RefUpdate.Result.LOCK_FAILURE); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private class FileReftableRefUpdate extends RefUpdate { |
||||||
|
FileReftableRefUpdate(Ref ref) { |
||||||
|
super(ref); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected RefDatabase getRefDatabase() { |
||||||
|
return FileReftableDatabase.this; |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected Repository getRepository() { |
||||||
|
return FileReftableDatabase.this.fileRepository; |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected void unlock() { |
||||||
|
// nop.
|
||||||
|
} |
||||||
|
|
||||||
|
private RevWalk rw; |
||||||
|
|
||||||
|
private Ref dstRef; |
||||||
|
|
||||||
|
@Override |
||||||
|
public Result update(RevWalk walk) throws IOException { |
||||||
|
try { |
||||||
|
rw = walk; |
||||||
|
return super.update(walk); |
||||||
|
} finally { |
||||||
|
rw = null; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected boolean tryLock(boolean deref) throws IOException { |
||||||
|
dstRef = getRef(); |
||||||
|
if (deref) { |
||||||
|
dstRef = dstRef.getLeaf(); |
||||||
|
} |
||||||
|
|
||||||
|
Ref derefed = exactRef(dstRef.getName()); |
||||||
|
if (derefed != null) { |
||||||
|
setOldObjectId(derefed.getObjectId()); |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
void writeUpdate(ReftableWriter w) throws IOException { |
||||||
|
Ref newRef = null; |
||||||
|
if (rw != null && !ObjectId.zeroId().equals(getNewObjectId())) { |
||||||
|
RevObject obj = rw.parseAny(getNewObjectId()); |
||||||
|
if (obj instanceof RevTag) { |
||||||
|
newRef = new ObjectIdRef.PeeledTag(Ref.Storage.PACKED, |
||||||
|
dstRef.getName(), getNewObjectId(), |
||||||
|
rw.peel(obj).copy()); |
||||||
|
} |
||||||
|
} |
||||||
|
if (newRef == null) { |
||||||
|
newRef = new ObjectIdRef.PeeledNonTag(Ref.Storage.PACKED, |
||||||
|
dstRef.getName(), getNewObjectId()); |
||||||
|
} |
||||||
|
|
||||||
|
long idx = reftableDatabase.nextUpdateIndex(); |
||||||
|
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin() |
||||||
|
.writeRef(newRef); |
||||||
|
|
||||||
|
ObjectId oldId = getOldObjectId(); |
||||||
|
if (oldId == null) { |
||||||
|
oldId = ObjectId.zeroId(); |
||||||
|
} |
||||||
|
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), oldId, |
||||||
|
getNewObjectId(), getRefLogMessage()); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public PersonIdent getRefLogIdent() { |
||||||
|
PersonIdent who = super.getRefLogIdent(); |
||||||
|
if (who == null) { |
||||||
|
who = new PersonIdent(getRepository()); |
||||||
|
} |
||||||
|
return who; |
||||||
|
} |
||||||
|
|
||||||
|
void writeDelete(ReftableWriter w) throws IOException { |
||||||
|
Ref newRef = new ObjectIdRef.Unpeeled(Ref.Storage.NEW, |
||||||
|
dstRef.getName(), null); |
||||||
|
long idx = reftableDatabase.nextUpdateIndex(); |
||||||
|
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin() |
||||||
|
.writeRef(newRef); |
||||||
|
|
||||||
|
ObjectId oldId = ObjectId.zeroId(); |
||||||
|
Ref old = exactRef(dstRef.getName()); |
||||||
|
if (old != null) { |
||||||
|
old = old.getLeaf(); |
||||||
|
if (old.getObjectId() != null) { |
||||||
|
oldId = old.getObjectId(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), oldId, |
||||||
|
ObjectId.zeroId(), getRefLogMessage()); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected Result doUpdate(Result desiredResult) throws IOException { |
||||||
|
if (isRefLogIncludingResult()) { |
||||||
|
setRefLogMessage( |
||||||
|
getRefLogMessage() + ": " + desiredResult.toString(), //$NON-NLS-1$
|
||||||
|
false); |
||||||
|
} |
||||||
|
|
||||||
|
if (!addReftable(this::writeUpdate)) { |
||||||
|
return Result.LOCK_FAILURE; |
||||||
|
} |
||||||
|
|
||||||
|
return desiredResult; |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected Result doDelete(Result desiredResult) throws IOException { |
||||||
|
|
||||||
|
if (isRefLogIncludingResult()) { |
||||||
|
setRefLogMessage( |
||||||
|
getRefLogMessage() + ": " + desiredResult.toString(), //$NON-NLS-1$
|
||||||
|
false); |
||||||
|
} |
||||||
|
|
||||||
|
if (!addReftable(this::writeDelete)) { |
||||||
|
return Result.LOCK_FAILURE; |
||||||
|
} |
||||||
|
|
||||||
|
return desiredResult; |
||||||
|
} |
||||||
|
|
||||||
|
void writeLink(ReftableWriter w) throws IOException { |
||||||
|
long idx = reftableDatabase.nextUpdateIndex(); |
||||||
|
w.setMinUpdateIndex(idx).setMaxUpdateIndex(idx).begin() |
||||||
|
.writeRef(dstRef); |
||||||
|
|
||||||
|
ObjectId beforeId = ObjectId.zeroId(); |
||||||
|
Ref before = exactRef(dstRef.getName()); |
||||||
|
if (before != null) { |
||||||
|
before = before.getLeaf(); |
||||||
|
if (before.getObjectId() != null) { |
||||||
|
beforeId = before.getObjectId(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
Ref after = dstRef.getLeaf(); |
||||||
|
ObjectId afterId = ObjectId.zeroId(); |
||||||
|
if (after.getObjectId() != null) { |
||||||
|
afterId = after.getObjectId(); |
||||||
|
} |
||||||
|
|
||||||
|
w.writeLog(dstRef.getName(), idx, getRefLogIdent(), beforeId, |
||||||
|
afterId, getRefLogMessage()); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected Result doLink(String target) throws IOException { |
||||||
|
if (isRefLogIncludingResult()) { |
||||||
|
setRefLogMessage( |
||||||
|
getRefLogMessage() + ": " + Result.FORCED.toString(), //$NON-NLS-1$
|
||||||
|
false); |
||||||
|
} |
||||||
|
|
||||||
|
boolean exists = exactRef(getName()) != null; |
||||||
|
dstRef = new SymbolicRef(getName(), |
||||||
|
new ObjectIdRef.Unpeeled(Ref.Storage.NEW, target, null), |
||||||
|
reftableDatabase.nextUpdateIndex()); |
||||||
|
|
||||||
|
if (!addReftable(this::writeLink)) { |
||||||
|
return Result.LOCK_FAILURE; |
||||||
|
} |
||||||
|
// XXX unclear if we should support FORCED here. Baseclass says
|
||||||
|
// NEW is OK ?
|
||||||
|
return exists ? Result.FORCED : Result.NEW; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private static void writeConvertTable(Repository repo, ReftableWriter w, |
||||||
|
boolean writeLogs) throws IOException { |
||||||
|
int size = 0; |
||||||
|
List<Ref> refs = repo.getRefDatabase().getRefs(); |
||||||
|
if (writeLogs) { |
||||||
|
for (Ref r : refs) { |
||||||
|
ReflogReader rlr = repo.getReflogReader(r.getName()); |
||||||
|
if (rlr != null) { |
||||||
|
size = Math.max(rlr.getReverseEntries().size(), size); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// We must use 1 here, nextUpdateIndex() on the empty stack is 1.
|
||||||
|
w.setMinUpdateIndex(1).setMaxUpdateIndex(size + 1).begin(); |
||||||
|
|
||||||
|
// The spec says to write the logs in the first table, and put refs in a
|
||||||
|
// separate table, but this complicates the compaction (when we can we drop
|
||||||
|
// deletions? Can we compact the .log table and the .ref table together?)
|
||||||
|
try (RevWalk rw = new RevWalk(repo)) { |
||||||
|
List<Ref> toWrite = new ArrayList<>(refs.size()); |
||||||
|
for (Ref r : refs) { |
||||||
|
toWrite.add(refForWrite(rw, r)); |
||||||
|
} |
||||||
|
w.sortAndWriteRefs(toWrite); |
||||||
|
} |
||||||
|
|
||||||
|
if (writeLogs) { |
||||||
|
for (Ref r : refs) { |
||||||
|
long idx = size; |
||||||
|
ReflogReader reader = repo.getReflogReader(r.getName()); |
||||||
|
if (reader == null) { |
||||||
|
continue; |
||||||
|
} |
||||||
|
for (ReflogEntry e : reader.getReverseEntries()) { |
||||||
|
w.writeLog(r.getName(), idx, e.getWho(), e.getOldId(), |
||||||
|
e.getNewId(), e.getComment()); |
||||||
|
idx--; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private static Ref refForWrite(RevWalk rw, Ref r) throws IOException { |
||||||
|
if (r.isSymbolic()) { |
||||||
|
return new SymbolicRef(r.getName(), new ObjectIdRef.Unpeeled(NEW, |
||||||
|
r.getTarget().getName(), null)); |
||||||
|
} |
||||||
|
ObjectId newId = r.getObjectId(); |
||||||
|
RevObject obj = rw.parseAny(newId); |
||||||
|
RevObject peel = null; |
||||||
|
if (obj instanceof RevTag) { |
||||||
|
peel = rw.peel(obj); |
||||||
|
} |
||||||
|
if (peel != null) { |
||||||
|
return new ObjectIdRef.PeeledTag(PACKED, r.getName(), newId, |
||||||
|
peel.copy()); |
||||||
|
} |
||||||
|
return new ObjectIdRef.PeeledNonTag(PACKED, r.getName(), newId); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @param repo |
||||||
|
* the repository |
||||||
|
* @param refstackName |
||||||
|
* the filename for the stack |
||||||
|
* @param writeLogs |
||||||
|
* whether to write reflogs |
||||||
|
* @return a reftable based RefDB from an existing repository. |
||||||
|
* @throws IOException |
||||||
|
* on IO error |
||||||
|
*/ |
||||||
|
public static FileReftableDatabase convertFrom(FileRepository repo, |
||||||
|
File refstackName, boolean writeLogs) throws IOException { |
||||||
|
FileReftableDatabase newDb = null; |
||||||
|
try { |
||||||
|
File reftableDir = new File(repo.getDirectory(), Constants.REFTABLE); |
||||||
|
if (!reftableDir.isDirectory()) { |
||||||
|
reftableDir.mkdir(); |
||||||
|
} |
||||||
|
|
||||||
|
try (FileReftableStack stack = new FileReftableStack(refstackName, |
||||||
|
reftableDir, null, () -> repo.getConfig())) { |
||||||
|
stack.addReftable(rw -> writeConvertTable(repo, rw, writeLogs)); |
||||||
|
} |
||||||
|
refstackName = null; |
||||||
|
} finally { |
||||||
|
if (refstackName != null) { |
||||||
|
refstackName.delete(); |
||||||
|
} |
||||||
|
} |
||||||
|
return newDb; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,768 @@ |
|||||||
|
/* |
||||||
|
* Copyright (C) 2019 Google LLC |
||||||
|
* and other copyright owners as documented in the project's IP log. |
||||||
|
* |
||||||
|
* This program and the accompanying materials are made available |
||||||
|
* under the terms of the Eclipse Distribution License v1.0 which |
||||||
|
* accompanies this distribution, is reproduced below, and is |
||||||
|
* available at http://www.eclipse.org/org/documents/edl-v10.php
|
||||||
|
* |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or |
||||||
|
* without modification, are permitted provided that the following |
||||||
|
* conditions are met: |
||||||
|
* |
||||||
|
* - Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* |
||||||
|
* - Redistributions in binary form must reproduce the above |
||||||
|
* copyright notice, this list of conditions and the following |
||||||
|
* disclaimer in the documentation and/or other materials provided |
||||||
|
* with the distribution. |
||||||
|
* |
||||||
|
* - Neither the name of the Eclipse Foundation, Inc. nor the |
||||||
|
* names of its contributors may be used to endorse or promote |
||||||
|
* products derived from this software without specific prior |
||||||
|
* written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
||||||
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
||||||
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.eclipse.jgit.internal.storage.file; |
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8; |
||||||
|
|
||||||
|
import java.io.BufferedReader; |
||||||
|
import java.io.File; |
||||||
|
import java.io.FileInputStream; |
||||||
|
import java.io.FileNotFoundException; |
||||||
|
import java.io.FileOutputStream; |
||||||
|
import java.io.IOException; |
||||||
|
import java.io.InputStreamReader; |
||||||
|
import java.nio.file.Files; |
||||||
|
import java.nio.file.StandardCopyOption; |
||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.Comparator; |
||||||
|
import java.util.List; |
||||||
|
import java.util.Map; |
||||||
|
import java.util.Optional; |
||||||
|
import java.util.function.Supplier; |
||||||
|
import java.util.stream.Collectors; |
||||||
|
|
||||||
|
import org.eclipse.jgit.annotations.Nullable; |
||||||
|
import org.eclipse.jgit.errors.LockFailedException; |
||||||
|
import org.eclipse.jgit.internal.storage.io.BlockSource; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.MergedReftable; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableCompactor; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableConfig; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableReader; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableWriter; |
||||||
|
import org.eclipse.jgit.lib.Config; |
||||||
|
import org.eclipse.jgit.util.FileUtils; |
||||||
|
|
||||||
|
/** |
||||||
|
* A mutable stack of reftables on local filesystem storage. Not thread-safe. |
||||||
|
* This is an AutoCloseable because this object owns the file handles to the |
||||||
|
* open reftables. |
||||||
|
*/ |
||||||
|
public class FileReftableStack implements AutoCloseable { |
||||||
|
private static class StackEntry { |
||||||
|
|
||||||
|
String name; |
||||||
|
|
||||||
|
ReftableReader reftableReader; |
||||||
|
} |
||||||
|
|
||||||
|
private MergedReftable mergedReftable; |
||||||
|
|
||||||
|
private List<StackEntry> stack; |
||||||
|
|
||||||
|
private long lastNextUpdateIndex; |
||||||
|
|
||||||
|
private final File stackPath; |
||||||
|
|
||||||
|
private final File reftableDir; |
||||||
|
|
||||||
|
private final Runnable onChange; |
||||||
|
|
||||||
|
private final Supplier<Config> configSupplier; |
||||||
|
|
||||||
|
// Used for stats & testing.
|
||||||
|
static class CompactionStats { |
||||||
|
|
||||||
|
long tables; |
||||||
|
|
||||||
|
long bytes; |
||||||
|
|
||||||
|
int attempted; |
||||||
|
|
||||||
|
int failed; |
||||||
|
|
||||||
|
long refCount; |
||||||
|
|
||||||
|
long logCount; |
||||||
|
|
||||||
|
CompactionStats() { |
||||||
|
tables = 0; |
||||||
|
bytes = 0; |
||||||
|
attempted = 0; |
||||||
|
failed = 0; |
||||||
|
logCount = 0; |
||||||
|
refCount = 0; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private final CompactionStats stats; |
||||||
|
|
||||||
|
/** |
||||||
|
* Creates a stack corresponding to the list of reftables in the argument |
||||||
|
* |
||||||
|
* @param stackPath |
||||||
|
* the filename for the stack. |
||||||
|
* @param reftableDir |
||||||
|
* the dir holding the tables. |
||||||
|
* @param onChange |
||||||
|
* hook to call if we notice a new write |
||||||
|
* @param configSupplier |
||||||
|
* Config supplier |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems |
||||||
|
*/ |
||||||
|
public FileReftableStack(File stackPath, File reftableDir, |
||||||
|
@Nullable Runnable onChange, Supplier<Config> configSupplier) |
||||||
|
throws IOException { |
||||||
|
this.stackPath = stackPath; |
||||||
|
this.reftableDir = reftableDir; |
||||||
|
this.stack = new ArrayList<>(); |
||||||
|
this.configSupplier = configSupplier; |
||||||
|
this.onChange = onChange; |
||||||
|
|
||||||
|
// skip event notification
|
||||||
|
lastNextUpdateIndex = 0; |
||||||
|
reload(); |
||||||
|
|
||||||
|
stats = new CompactionStats(); |
||||||
|
} |
||||||
|
|
||||||
|
CompactionStats getStats() { |
||||||
|
return stats; |
||||||
|
} |
||||||
|
|
||||||
|
/** Thrown if the update indices in the stack are not monotonic */ |
||||||
|
public static class ReftableNumbersNotIncreasingException |
||||||
|
extends RuntimeException { |
||||||
|
private static final long serialVersionUID = 1L; |
||||||
|
|
||||||
|
String name; |
||||||
|
|
||||||
|
long lastMax; |
||||||
|
|
||||||
|
long min; |
||||||
|
|
||||||
|
ReftableNumbersNotIncreasingException(String name, long lastMax, |
||||||
|
long min) { |
||||||
|
this.name = name; |
||||||
|
this.lastMax = lastMax; |
||||||
|
this.min = min; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Reloads the stack, potentially reusing opened reftableReaders. |
||||||
|
* |
||||||
|
* @param names |
||||||
|
* holds the names of the tables to load. |
||||||
|
* @throws FileNotFoundException |
||||||
|
* load must be retried. |
||||||
|
* @throws IOException |
||||||
|
* on other IO errors. |
||||||
|
*/ |
||||||
|
private void reloadOnce(List<String> names) |
||||||
|
throws IOException, FileNotFoundException { |
||||||
|
Map<String, ReftableReader> current = stack.stream() |
||||||
|
.collect(Collectors.toMap(e -> e.name, e -> e.reftableReader)); |
||||||
|
|
||||||
|
List<ReftableReader> newTables = new ArrayList<>(); |
||||||
|
List<StackEntry> newStack = new ArrayList<>(stack.size() + 1); |
||||||
|
try { |
||||||
|
ReftableReader last = null; |
||||||
|
for (String name : names) { |
||||||
|
StackEntry entry = new StackEntry(); |
||||||
|
entry.name = name; |
||||||
|
|
||||||
|
ReftableReader t = null; |
||||||
|
if (current.containsKey(name)) { |
||||||
|
t = current.remove(name); |
||||||
|
} else { |
||||||
|
File subtable = new File(reftableDir, name); |
||||||
|
FileInputStream is; |
||||||
|
|
||||||
|
is = new FileInputStream(subtable); |
||||||
|
|
||||||
|
t = new ReftableReader(BlockSource.from(is)); |
||||||
|
newTables.add(t); |
||||||
|
} |
||||||
|
|
||||||
|
if (last != null) { |
||||||
|
// TODO: move this to MergedReftable
|
||||||
|
if (last.maxUpdateIndex() >= t.minUpdateIndex()) { |
||||||
|
throw new ReftableNumbersNotIncreasingException(name, |
||||||
|
last.maxUpdateIndex(), t.minUpdateIndex()); |
||||||
|
} |
||||||
|
} |
||||||
|
last = t; |
||||||
|
|
||||||
|
entry.reftableReader = t; |
||||||
|
newStack.add(entry); |
||||||
|
} |
||||||
|
// survived without exceptions: swap in new stack, and close
|
||||||
|
// dangling tables.
|
||||||
|
stack = newStack; |
||||||
|
newTables.clear(); |
||||||
|
|
||||||
|
current.values().forEach(r -> { |
||||||
|
try { |
||||||
|
r.close(); |
||||||
|
} catch (IOException e) { |
||||||
|
throw new AssertionError(e); |
||||||
|
} |
||||||
|
}); |
||||||
|
} finally { |
||||||
|
newTables.forEach(t -> { |
||||||
|
try { |
||||||
|
t.close(); |
||||||
|
} catch (IOException ioe) { |
||||||
|
// reader close should not generate errors.
|
||||||
|
throw new AssertionError(ioe); |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
void reload() throws IOException { |
||||||
|
// Try for 2.5 seconds.
|
||||||
|
long deadline = System.currentTimeMillis() + 2500; |
||||||
|
// A successful reftable transaction is 2 atomic file writes
|
||||||
|
// (open, write, close, rename), which a fast Linux system should be
|
||||||
|
// able to do in about ~200us. So 1 ms should be ample time.
|
||||||
|
long min = 1; |
||||||
|
long max = 1000; |
||||||
|
long delay = 0; |
||||||
|
boolean success = false; |
||||||
|
while (System.currentTimeMillis() < deadline) { |
||||||
|
List<String> names = readTableNames(); |
||||||
|
try { |
||||||
|
reloadOnce(names); |
||||||
|
success = true; |
||||||
|
break; |
||||||
|
} catch (FileNotFoundException e) { |
||||||
|
List<String> changed = readTableNames(); |
||||||
|
if (changed.equals(names)) { |
||||||
|
throw e; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
delay = FileUtils.delay(delay, min, max); |
||||||
|
try { |
||||||
|
Thread.sleep(delay); |
||||||
|
} catch (InterruptedException e) { |
||||||
|
Thread.currentThread().interrupt(); |
||||||
|
throw new RuntimeException(e); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (!success) { |
||||||
|
// TODO: should reexamine the 'refs' file to see if it was the same
|
||||||
|
// if it didn't change, then we must have corruption. If it did,
|
||||||
|
// retry.
|
||||||
|
throw new LockFailedException(stackPath); |
||||||
|
} |
||||||
|
|
||||||
|
mergedReftable = new MergedReftable(stack.stream() |
||||||
|
.map(x -> x.reftableReader).collect(Collectors.toList())); |
||||||
|
long curr = nextUpdateIndex(); |
||||||
|
if (lastNextUpdateIndex > 0 && lastNextUpdateIndex != curr |
||||||
|
&& onChange != null) { |
||||||
|
onChange.run(); |
||||||
|
} |
||||||
|
lastNextUpdateIndex = curr; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return the merged reftable |
||||||
|
*/ |
||||||
|
public MergedReftable getMergedReftable() { |
||||||
|
return mergedReftable; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Writer is a callable that writes data to a reftable under construction. |
||||||
|
* It should set the min/max update index, and then write refs and/or logs. |
||||||
|
* It should not call finish() on the writer. |
||||||
|
*/ |
||||||
|
public interface Writer { |
||||||
|
/** |
||||||
|
* Write data to reftable |
||||||
|
* |
||||||
|
* @param w |
||||||
|
* writer to use |
||||||
|
* @throws IOException |
||||||
|
*/ |
||||||
|
void call(ReftableWriter w) throws IOException; |
||||||
|
} |
||||||
|
|
||||||
|
private List<String> readTableNames() throws IOException { |
||||||
|
List<String> names = new ArrayList<>(stack.size() + 1); |
||||||
|
|
||||||
|
try (BufferedReader br = new BufferedReader( |
||||||
|
new InputStreamReader(new FileInputStream(stackPath), UTF_8))) { |
||||||
|
String line; |
||||||
|
while ((line = br.readLine()) != null) { |
||||||
|
if (!line.isEmpty()) { |
||||||
|
names.add(line); |
||||||
|
} |
||||||
|
} |
||||||
|
} catch (FileNotFoundException e) { |
||||||
|
// file isn't there: empty repository.
|
||||||
|
} |
||||||
|
return names; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return true if the on-disk file corresponds to the in-memory data. |
||||||
|
* @throws IOException |
||||||
|
* on IO problem |
||||||
|
*/ |
||||||
|
boolean isUpToDate() throws IOException { |
||||||
|
// We could use FileSnapshot to avoid reading the file, but the file is
|
||||||
|
// small so it's probably a minor optimization.
|
||||||
|
try { |
||||||
|
List<String> names = readTableNames(); |
||||||
|
if (names.size() != stack.size()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
for (int i = 0; i < names.size(); i++) { |
||||||
|
if (!names.get(i).equals(stack.get(i).name)) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
} |
||||||
|
} catch (FileNotFoundException e) { |
||||||
|
return stack.isEmpty(); |
||||||
|
} |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* {@inheritDoc} |
||||||
|
*/ |
||||||
|
@Override |
||||||
|
public void close() { |
||||||
|
for (StackEntry entry : stack) { |
||||||
|
try { |
||||||
|
entry.reftableReader.close(); |
||||||
|
} catch (Exception e) { |
||||||
|
// we are reading; this should never fail.
|
||||||
|
throw new AssertionError(e); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private long nextUpdateIndex() throws IOException { |
||||||
|
return stack.size() > 0 |
||||||
|
? stack.get(stack.size() - 1).reftableReader.maxUpdateIndex() |
||||||
|
+ 1 |
||||||
|
: 1; |
||||||
|
} |
||||||
|
|
||||||
|
private String filename(long low, long high) { |
||||||
|
return String.format("%012x-%012x", //$NON-NLS-1$
|
||||||
|
Long.valueOf(low), Long.valueOf(high)); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Tries to add a new reftable to the stack. Returns true if it succeeded, |
||||||
|
* or false if there was a lock failure, due to races with other processes. |
||||||
|
* This is package private so FileReftableDatabase can call into here. |
||||||
|
* |
||||||
|
* @param w |
||||||
|
* writer to write data to a reftable under construction |
||||||
|
* @return true if the transaction. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems |
||||||
|
*/ |
||||||
|
@SuppressWarnings("nls") |
||||||
|
public boolean addReftable(Writer w) throws IOException { |
||||||
|
LockFile lock = new LockFile(stackPath); |
||||||
|
try { |
||||||
|
if (!lock.lockForAppend()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
if (!isUpToDate()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
String fn = filename(nextUpdateIndex(), nextUpdateIndex()); |
||||||
|
|
||||||
|
File tmpTable = File.createTempFile(fn + "_", ".ref", |
||||||
|
stackPath.getParentFile()); |
||||||
|
|
||||||
|
ReftableWriter.Stats s; |
||||||
|
try (FileOutputStream fos = new FileOutputStream(tmpTable)) { |
||||||
|
ReftableWriter rw = new ReftableWriter(reftableConfig(), fos); |
||||||
|
w.call(rw); |
||||||
|
rw.finish(); |
||||||
|
s = rw.getStats(); |
||||||
|
} |
||||||
|
|
||||||
|
if (s.minUpdateIndex() < nextUpdateIndex()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
// The spec says to name log-only files with .log, which is somewhat
|
||||||
|
// pointless given compaction, but we do so anyway.
|
||||||
|
fn += s.refCount() > 0 ? ".ref" : ".log"; |
||||||
|
File dest = new File(reftableDir, fn); |
||||||
|
|
||||||
|
FileUtils.rename(tmpTable, dest, StandardCopyOption.ATOMIC_MOVE); |
||||||
|
lock.write((fn + "\n").getBytes(UTF_8)); |
||||||
|
if (!lock.commit()) { |
||||||
|
FileUtils.delete(dest); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
reload(); |
||||||
|
|
||||||
|
autoCompact(); |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
private ReftableConfig reftableConfig() { |
||||||
|
return new ReftableConfig(configSupplier.get()); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Write the reftable for the given range into a temp file. |
||||||
|
* |
||||||
|
* @param first |
||||||
|
* index of first stack entry to be written |
||||||
|
* @param last |
||||||
|
* index of last stack entry to be written |
||||||
|
* @return the file holding the replacement table. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problem |
||||||
|
*/ |
||||||
|
private File compactLocked(int first, int last) throws IOException { |
||||||
|
String fn = filename(first, last); |
||||||
|
|
||||||
|
File tmpTable = File.createTempFile(fn + "_", ".ref", //$NON-NLS-1$//$NON-NLS-2$
|
||||||
|
stackPath.getParentFile()); |
||||||
|
try (FileOutputStream fos = new FileOutputStream(tmpTable)) { |
||||||
|
ReftableCompactor c = new ReftableCompactor(fos) |
||||||
|
.setConfig(reftableConfig()) |
||||||
|
.setMinUpdateIndex( |
||||||
|
stack.get(first).reftableReader.minUpdateIndex()) |
||||||
|
.setMaxUpdateIndex( |
||||||
|
stack.get(last).reftableReader.maxUpdateIndex()) |
||||||
|
.setIncludeDeletes(first > 0); |
||||||
|
|
||||||
|
List<ReftableReader> compactMe = new ArrayList<>(); |
||||||
|
long totalBytes = 0; |
||||||
|
for (int i = first; i <= last; i++) { |
||||||
|
compactMe.add(stack.get(i).reftableReader); |
||||||
|
totalBytes += stack.get(i).reftableReader.size(); |
||||||
|
} |
||||||
|
c.addAll(compactMe); |
||||||
|
|
||||||
|
c.compact(); |
||||||
|
|
||||||
|
// Even though the compaction did not definitely succeed, we keep
|
||||||
|
// tally here as we've expended the effort.
|
||||||
|
stats.bytes += totalBytes; |
||||||
|
stats.tables += first - last + 1; |
||||||
|
stats.attempted++; |
||||||
|
stats.refCount += c.getStats().refCount(); |
||||||
|
stats.logCount += c.getStats().logCount(); |
||||||
|
} |
||||||
|
|
||||||
|
return tmpTable; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Compacts a range of the stack, following the file locking protocol |
||||||
|
* documented in the spec. |
||||||
|
* |
||||||
|
* @param first |
||||||
|
* index of first stack entry to be considered in compaction |
||||||
|
* @param last |
||||||
|
* index of last stack entry to be considered in compaction |
||||||
|
* @return true if a compaction was successfully applied. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problem |
||||||
|
*/ |
||||||
|
boolean compactRange(int first, int last) throws IOException { |
||||||
|
if (first >= last) { |
||||||
|
return true; |
||||||
|
} |
||||||
|
LockFile lock = new LockFile(stackPath); |
||||||
|
|
||||||
|
File tmpTable = null; |
||||||
|
List<LockFile> subtableLocks = new ArrayList<>(); |
||||||
|
|
||||||
|
try { |
||||||
|
if (!lock.lock()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
if (!isUpToDate()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
List<File> deleteOnSuccess = new ArrayList<>(); |
||||||
|
for (int i = first; i <= last; i++) { |
||||||
|
File f = new File(reftableDir, stack.get(i).name); |
||||||
|
LockFile lf = new LockFile(f); |
||||||
|
if (!lf.lock()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
subtableLocks.add(lf); |
||||||
|
deleteOnSuccess.add(f); |
||||||
|
} |
||||||
|
|
||||||
|
lock.unlock(); |
||||||
|
lock = null; |
||||||
|
|
||||||
|
tmpTable = compactLocked(first, last); |
||||||
|
|
||||||
|
lock = new LockFile(stackPath); |
||||||
|
if (!lock.lock()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
if (!isUpToDate()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
String fn = filename( |
||||||
|
stack.get(first).reftableReader.minUpdateIndex(), |
||||||
|
stack.get(last).reftableReader.maxUpdateIndex()); |
||||||
|
|
||||||
|
// The spec suggests to use .log for log-only tables, and collect
|
||||||
|
// all log entries in a single file at the bottom of the stack. That would
|
||||||
|
// require supporting overlapping ranges for the different tables. For the
|
||||||
|
// sake of simplicity, we simply ignore this and always produce a log +
|
||||||
|
// ref combined table.
|
||||||
|
fn += ".ref"; //$NON-NLS-1$
|
||||||
|
File dest = new File(reftableDir, fn); |
||||||
|
|
||||||
|
FileUtils.rename(tmpTable, dest, StandardCopyOption.ATOMIC_MOVE); |
||||||
|
tmpTable = null; |
||||||
|
|
||||||
|
StringBuilder sb = new StringBuilder(); |
||||||
|
|
||||||
|
for (int i = 0; i < first; i++) { |
||||||
|
sb.append(stack.get(i).name + "\n"); //$NON-NLS-1$
|
||||||
|
} |
||||||
|
sb.append(fn + "\n"); //$NON-NLS-1$
|
||||||
|
for (int i = last + 1; i < stack.size(); i++) { |
||||||
|
sb.append(stack.get(i).name + "\n"); //$NON-NLS-1$
|
||||||
|
} |
||||||
|
|
||||||
|
lock.write(sb.toString().getBytes(UTF_8)); |
||||||
|
if (!lock.commit()) { |
||||||
|
dest.delete(); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
for (File f : deleteOnSuccess) { |
||||||
|
Files.delete(f.toPath()); |
||||||
|
} |
||||||
|
|
||||||
|
reload(); |
||||||
|
return true; |
||||||
|
} finally { |
||||||
|
if (tmpTable != null) { |
||||||
|
tmpTable.delete(); |
||||||
|
} |
||||||
|
for (LockFile lf : subtableLocks) { |
||||||
|
lf.unlock(); |
||||||
|
} |
||||||
|
if (lock != null) { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Calculate an approximate log2. |
||||||
|
* |
||||||
|
* @param sz |
||||||
|
* @return log2 |
||||||
|
*/ |
||||||
|
static int log(long sz) { |
||||||
|
long base = 2; |
||||||
|
if (sz <= 0) { |
||||||
|
throw new IllegalArgumentException("log2 negative"); //$NON-NLS-1$
|
||||||
|
} |
||||||
|
int l = 0; |
||||||
|
while (sz > 0) { |
||||||
|
l++; |
||||||
|
sz /= base; |
||||||
|
} |
||||||
|
|
||||||
|
return l - 1; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* A segment is a consecutive list of reftables of the same approximate |
||||||
|
* size. |
||||||
|
*/ |
||||||
|
static class Segment { |
||||||
|
// the approximate log_2 of the size.
|
||||||
|
int log; |
||||||
|
|
||||||
|
// The total bytes in this segment
|
||||||
|
long bytes; |
||||||
|
|
||||||
|
int start; |
||||||
|
|
||||||
|
int end; // exclusive.
|
||||||
|
|
||||||
|
int size() { |
||||||
|
return end - start; |
||||||
|
} |
||||||
|
|
||||||
|
Segment(int start, int end, int log, long bytes) { |
||||||
|
this.log = log; |
||||||
|
this.start = start; |
||||||
|
this.end = end; |
||||||
|
this.bytes = bytes; |
||||||
|
} |
||||||
|
|
||||||
|
Segment() { |
||||||
|
this(0, 0, 0, 0); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public int hashCode() { |
||||||
|
return 0; // appease error-prone
|
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public boolean equals(Object other) { |
||||||
|
Segment o = (Segment) other; |
||||||
|
return o.bytes == bytes && o.log == log && o.start == start |
||||||
|
&& o.end == end; |
||||||
|
} |
||||||
|
|
||||||
|
@SuppressWarnings("boxing") |
||||||
|
@Override |
||||||
|
public String toString() { |
||||||
|
return String.format("{ [%d,%d) l=%d sz=%d }", start, end, log, //$NON-NLS-1$
|
||||||
|
bytes); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
static List<Segment> segmentSizes(long sizes[]) { |
||||||
|
List<Segment> segments = new ArrayList<>(); |
||||||
|
Segment cur = new Segment(); |
||||||
|
for (int i = 0; i < sizes.length; i++) { |
||||||
|
int l = log(sizes[i]); |
||||||
|
if (l != cur.log && cur.bytes > 0) { |
||||||
|
segments.add(cur); |
||||||
|
cur = new Segment(); |
||||||
|
cur.start = i; |
||||||
|
cur.log = l; |
||||||
|
} |
||||||
|
|
||||||
|
cur.log = l; |
||||||
|
cur.end = i + 1; |
||||||
|
cur.bytes += sizes[i]; |
||||||
|
} |
||||||
|
segments.add(cur); |
||||||
|
return segments; |
||||||
|
} |
||||||
|
|
||||||
|
private static Optional<Segment> autoCompactCandidate(long sizes[]) { |
||||||
|
if (sizes.length == 0) { |
||||||
|
return Optional.empty(); |
||||||
|
} |
||||||
|
|
||||||
|
// The cost of compaction is proportional to the size, and we want to
|
||||||
|
// avoid frequent large compactions. We do this by playing the game 2048
|
||||||
|
// here: first compact together the smallest tables if there are more
|
||||||
|
// than one. Then try to see if the result will be big enough to match
|
||||||
|
// up with next up.
|
||||||
|
|
||||||
|
List<Segment> segments = segmentSizes(sizes); |
||||||
|
segments = segments.stream().filter(s -> s.size() > 1) |
||||||
|
.collect(Collectors.toList()); |
||||||
|
if (segments.isEmpty()) { |
||||||
|
return Optional.empty(); |
||||||
|
} |
||||||
|
|
||||||
|
Optional<Segment> optMinSeg = segments.stream() |
||||||
|
.min(Comparator.comparing(s -> Integer.valueOf(s.log))); |
||||||
|
// Input is non-empty, so always present.
|
||||||
|
Segment smallCollected = optMinSeg.get(); |
||||||
|
while (smallCollected.start > 0) { |
||||||
|
int prev = smallCollected.start - 1; |
||||||
|
long prevSize = sizes[prev]; |
||||||
|
if (log(smallCollected.bytes) < log(prevSize)) { |
||||||
|
break; |
||||||
|
} |
||||||
|
smallCollected.start = prev; |
||||||
|
smallCollected.bytes += prevSize; |
||||||
|
} |
||||||
|
|
||||||
|
return Optional.of(smallCollected); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Heuristically tries to compact the stack if the stack has a suitable |
||||||
|
* shape. |
||||||
|
* |
||||||
|
* @throws IOException |
||||||
|
*/ |
||||||
|
private void autoCompact() throws IOException { |
||||||
|
Optional<Segment> cand = autoCompactCandidate(tableSizes()); |
||||||
|
if (cand.isPresent()) { |
||||||
|
if (!compactRange(cand.get().start, cand.get().end - 1)) { |
||||||
|
stats.failed++; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// 68b footer, 24b header = 92.
|
||||||
|
private static long OVERHEAD = 91; |
||||||
|
|
||||||
|
private long[] tableSizes() throws IOException { |
||||||
|
long[] sizes = new long[stack.size()]; |
||||||
|
for (int i = 0; i < stack.size(); i++) { |
||||||
|
// If we don't subtract the overhead, the file size isn't
|
||||||
|
// proportional to the number of entries. This will cause us to
|
||||||
|
// compact too often, which is expensive.
|
||||||
|
sizes[i] = stack.get(i).reftableReader.size() - OVERHEAD; |
||||||
|
} |
||||||
|
return sizes; |
||||||
|
} |
||||||
|
|
||||||
|
void compactFully() throws IOException { |
||||||
|
if (!compactRange(0, stack.size() - 1)) { |
||||||
|
stats.failed++; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue