Browse Source
This introduces ReftableBatchRefUpdate and ReftableDatabase, as generic classes, with some code moved to DfsReftableBatchRefUpdate and DfsReftableDatabase. Clarify thread-safety requirements by asserting locked status in accessors, and acquiring locks in callers. This does not fix threading problems, because ReftableBatchRefUpdate already wraps the whole transaction in a lock. This also fixes a number of bugs in ReftableBatchRefUpdate: * non-atomic updates should not bail on first failure * isNameConflicting should also check for conflicts between names that are added and removed in the BatchRefUpdate. Change-Id: I5ec91173ea9a0aa19da444c8c0b2e0f4e8f88798 Signed-off-by: Han-Wen Nienhuys <hanwen@google.com> Signed-off-by: Matthias Sohn <matthias.sohn@sap.com>next
Han-Wen Nienhuys
5 years ago
5 changed files with 721 additions and 372 deletions
@ -0,0 +1,196 @@ |
|||||||
|
/* |
||||||
|
* Copyright (C) 2019, Google Inc. |
||||||
|
* and other copyright owners as documented in the project's IP log. |
||||||
|
* |
||||||
|
* This program and the accompanying materials are made available |
||||||
|
* under the terms of the Eclipse Distribution License v1.0 which |
||||||
|
* accompanies this distribution, is reproduced below, and is |
||||||
|
* available at http://www.eclipse.org/org/documents/edl-v10.php
|
||||||
|
* |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or |
||||||
|
* without modification, are permitted provided that the following |
||||||
|
* conditions are met: |
||||||
|
* |
||||||
|
* - Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* |
||||||
|
* - Redistributions in binary form must reproduce the above |
||||||
|
* copyright notice, this list of conditions and the following |
||||||
|
* disclaimer in the documentation and/or other materials provided |
||||||
|
* with the distribution. |
||||||
|
* |
||||||
|
* - Neither the name of the Eclipse Foundation, Inc. nor the |
||||||
|
* names of its contributors may be used to endorse or promote |
||||||
|
* products derived from this software without specific prior |
||||||
|
* written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
||||||
|
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
||||||
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
||||||
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
||||||
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.eclipse.jgit.internal.storage.dfs; |
||||||
|
|
||||||
|
import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource; |
||||||
|
import org.eclipse.jgit.internal.storage.io.BlockSource; |
||||||
|
import org.eclipse.jgit.internal.storage.pack.PackExt; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableBatchRefUpdate; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableCompactor; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableConfig; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableReader; |
||||||
|
import org.eclipse.jgit.internal.storage.reftable.ReftableWriter; |
||||||
|
import org.eclipse.jgit.lib.Ref; |
||||||
|
import org.eclipse.jgit.transport.ReceiveCommand; |
||||||
|
|
||||||
|
import java.io.ByteArrayOutputStream; |
||||||
|
import java.io.IOException; |
||||||
|
import java.io.OutputStream; |
||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.Collections; |
||||||
|
import java.util.List; |
||||||
|
import java.util.Set; |
||||||
|
|
||||||
|
import static org.eclipse.jgit.internal.storage.pack.PackExt.REFTABLE; |
||||||
|
|
||||||
|
/** |
||||||
|
* {@link org.eclipse.jgit.lib.BatchRefUpdate} for |
||||||
|
* {@link org.eclipse.jgit.internal.storage.dfs.DfsReftableDatabase}. |
||||||
|
*/ |
||||||
|
public class DfsReftableBatchRefUpdate extends ReftableBatchRefUpdate { |
||||||
|
private static final int AVG_BYTES = 36; |
||||||
|
|
||||||
|
private final DfsReftableDatabase refdb; |
||||||
|
|
||||||
|
private final DfsObjDatabase odb; |
||||||
|
|
||||||
|
/** |
||||||
|
* Initialize batch update. |
||||||
|
* |
||||||
|
* @param refdb |
||||||
|
* database the update will modify. |
||||||
|
* @param odb |
||||||
|
* object database to store the reftable. |
||||||
|
*/ |
||||||
|
protected DfsReftableBatchRefUpdate(DfsReftableDatabase refdb, |
||||||
|
DfsObjDatabase odb) { |
||||||
|
super(refdb, refdb.reftableDatabase, refdb.getLock(), refdb.getRepository()); |
||||||
|
this.refdb = refdb; |
||||||
|
this.odb = odb; |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
protected void applyUpdates(List<Ref> newRefs, List<ReceiveCommand> pending) |
||||||
|
throws IOException { |
||||||
|
Set<DfsPackDescription> prune = Collections.emptySet(); |
||||||
|
DfsPackDescription pack = odb.newPack(PackSource.INSERT); |
||||||
|
try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { |
||||||
|
ReftableConfig cfg = DfsPackCompactor |
||||||
|
.configureReftable(refdb.getReftableConfig(), out); |
||||||
|
|
||||||
|
ReftableWriter.Stats stats; |
||||||
|
if (refdb.compactDuringCommit() |
||||||
|
&& newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() |
||||||
|
&& canCompactTopOfStack(cfg)) { |
||||||
|
ByteArrayOutputStream tmp = new ByteArrayOutputStream(); |
||||||
|
ReftableWriter rw = new ReftableWriter(cfg, tmp); |
||||||
|
write(rw, newRefs, pending); |
||||||
|
rw.finish(); |
||||||
|
stats = compactTopOfStack(out, cfg, tmp.toByteArray()); |
||||||
|
prune = toPruneTopOfStack(); |
||||||
|
} else { |
||||||
|
ReftableWriter rw = new ReftableWriter(cfg, out); |
||||||
|
write(rw, newRefs, pending); |
||||||
|
rw.finish(); |
||||||
|
stats = rw.getStats(); |
||||||
|
} |
||||||
|
pack.addFileExt(REFTABLE); |
||||||
|
pack.setReftableStats(stats); |
||||||
|
} |
||||||
|
|
||||||
|
odb.commitPack(Collections.singleton(pack), prune); |
||||||
|
odb.addReftable(pack, prune); |
||||||
|
refdb.clearCache(); |
||||||
|
} |
||||||
|
|
||||||
|
private boolean canCompactTopOfStack(ReftableConfig cfg) |
||||||
|
throws IOException { |
||||||
|
refdb.getLock().lock(); |
||||||
|
try { |
||||||
|
DfsReftableStack stack = refdb.stack(); |
||||||
|
List<ReftableReader> readers = stack.readers(); |
||||||
|
if (readers.isEmpty()) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
int lastIdx = readers.size() - 1; |
||||||
|
DfsReftable last = stack.files().get(lastIdx); |
||||||
|
DfsPackDescription desc = last.getPackDescription(); |
||||||
|
if (desc.getPackSource() != PackSource.INSERT |
||||||
|
|| !packOnlyContainsReftable(desc)) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
ReftableReader table = readers.get(lastIdx); |
||||||
|
int bs = cfg.getRefBlockSize(); |
||||||
|
return table.size() <= 3 * bs; |
||||||
|
} finally { |
||||||
|
refdb.getLock().unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private ReftableWriter.Stats compactTopOfStack(OutputStream out, |
||||||
|
ReftableConfig cfg, byte[] newTable) throws IOException { |
||||||
|
refdb.getLock().lock(); |
||||||
|
try { |
||||||
|
List<ReftableReader> stack = refdb.stack().readers(); |
||||||
|
|
||||||
|
ReftableReader last = stack.get(stack.size() - 1); |
||||||
|
|
||||||
|
List<ReftableReader> tables = new ArrayList<>(2); |
||||||
|
tables.add(last); |
||||||
|
tables.add(new ReftableReader(BlockSource.from(newTable))); |
||||||
|
|
||||||
|
ReftableCompactor compactor = new ReftableCompactor(out); |
||||||
|
compactor.setConfig(cfg); |
||||||
|
compactor.setIncludeDeletes(true); |
||||||
|
compactor.addAll(tables); |
||||||
|
compactor.compact(); |
||||||
|
return compactor.getStats(); |
||||||
|
} finally { |
||||||
|
refdb.getLock().unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private Set<DfsPackDescription> toPruneTopOfStack() throws IOException { |
||||||
|
refdb.getLock().lock(); |
||||||
|
try { |
||||||
|
List<DfsReftable> stack = refdb.stack().files(); |
||||||
|
|
||||||
|
DfsReftable last = stack.get(stack.size() - 1); |
||||||
|
return Collections.singleton(last.getPackDescription()); |
||||||
|
} finally { |
||||||
|
refdb.getLock().unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private boolean packOnlyContainsReftable(DfsPackDescription desc) { |
||||||
|
for (PackExt ext : PackExt.values()) { |
||||||
|
if (ext != REFTABLE && desc.hasFileExt(ext)) { |
||||||
|
return false; |
||||||
|
} |
||||||
|
} |
||||||
|
return true; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,295 @@ |
|||||||
|
package org.eclipse.jgit.internal.storage.reftable; |
||||||
|
|
||||||
|
import java.io.IOException; |
||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.Collections; |
||||||
|
import java.util.HashSet; |
||||||
|
import java.util.List; |
||||||
|
import java.util.Set; |
||||||
|
import java.util.TreeSet; |
||||||
|
import java.util.concurrent.locks.ReentrantLock; |
||||||
|
|
||||||
|
import org.eclipse.jgit.annotations.Nullable; |
||||||
|
import org.eclipse.jgit.lib.ObjectId; |
||||||
|
import org.eclipse.jgit.lib.Ref; |
||||||
|
import org.eclipse.jgit.lib.RefDatabase; |
||||||
|
import org.eclipse.jgit.lib.ReflogReader; |
||||||
|
import org.eclipse.jgit.transport.ReceiveCommand; |
||||||
|
|
||||||
|
/** |
||||||
|
* Operations on {@link MergedReftable} that is common to various reftable-using |
||||||
|
* subclasses of {@link RefDatabase}. See |
||||||
|
* {@link org.eclipse.jgit.internal.storage.dfs.DfsReftableDatabase} for an |
||||||
|
* example. |
||||||
|
*/ |
||||||
|
public abstract class ReftableDatabase { |
||||||
|
// Protects mergedTables.
|
||||||
|
private final ReentrantLock lock = new ReentrantLock(true); |
||||||
|
|
||||||
|
private Reftable mergedTables; |
||||||
|
|
||||||
|
/** |
||||||
|
* ReftableDatabase lazily initializes its merged reftable on the first read after |
||||||
|
* construction or clearCache() call. This function should always instantiate a new |
||||||
|
* MergedReftable based on the list of reftables specified by the underlying storage. |
||||||
|
* |
||||||
|
* @return the ReftableStack for this instance |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems. |
||||||
|
*/ |
||||||
|
abstract protected MergedReftable openMergedReftable() throws IOException; |
||||||
|
|
||||||
|
/** |
||||||
|
* @return the next available logical timestamp for an additional reftable |
||||||
|
* in the stack. |
||||||
|
* @throws java.io.IOException |
||||||
|
* on I/O problems. |
||||||
|
*/ |
||||||
|
public long nextUpdateIndex() throws IOException { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
return reader().maxUpdateIndex() + 1; |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return a ReflogReader for the given ref |
||||||
|
* @param refname |
||||||
|
* the name of the ref. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems |
||||||
|
*/ |
||||||
|
public ReflogReader getReflogReader(String refname) throws IOException { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
return new ReftableReflogReader(lock, reader(), refname); |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return a ReceiveCommand for the change from oldRef to newRef |
||||||
|
* @param oldRef |
||||||
|
* a ref |
||||||
|
* @param newRef |
||||||
|
* a ref |
||||||
|
*/ |
||||||
|
public static ReceiveCommand toCommand(Ref oldRef, Ref newRef) { |
||||||
|
ObjectId oldId = toId(oldRef); |
||||||
|
ObjectId newId = toId(newRef); |
||||||
|
String name = oldRef != null ? oldRef.getName() : newRef.getName(); |
||||||
|
|
||||||
|
if (oldRef != null && oldRef.isSymbolic()) { |
||||||
|
if (newRef != null) { |
||||||
|
if (newRef.isSymbolic()) { |
||||||
|
return ReceiveCommand.link(oldRef.getTarget().getName(), |
||||||
|
newRef.getTarget().getName(), name); |
||||||
|
} else { |
||||||
|
// This should pass in oldId for compat with
|
||||||
|
// RefDirectoryUpdate
|
||||||
|
return ReceiveCommand.unlink(oldRef.getTarget().getName(), |
||||||
|
newId, name); |
||||||
|
} |
||||||
|
} else { |
||||||
|
return ReceiveCommand.unlink(oldRef.getTarget().getName(), |
||||||
|
ObjectId.zeroId(), name); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (newRef != null && newRef.isSymbolic()) { |
||||||
|
if (oldRef != null) { |
||||||
|
if (oldRef.isSymbolic()) { |
||||||
|
return ReceiveCommand.link(oldRef.getTarget().getName(), |
||||||
|
newRef.getTarget().getName(), name); |
||||||
|
} else { |
||||||
|
return ReceiveCommand.link(oldId, |
||||||
|
newRef.getTarget().getName(), name); |
||||||
|
} |
||||||
|
} else { |
||||||
|
return ReceiveCommand.link(ObjectId.zeroId(), |
||||||
|
newRef.getTarget().getName(), name); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return new ReceiveCommand(oldId, newId, name); |
||||||
|
} |
||||||
|
|
||||||
|
private static ObjectId toId(Ref ref) { |
||||||
|
if (ref != null) { |
||||||
|
ObjectId id = ref.getObjectId(); |
||||||
|
if (id != null) { |
||||||
|
return id; |
||||||
|
} |
||||||
|
} |
||||||
|
return ObjectId.zeroId(); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return the lock protecting underlying ReftableReaders against concurrent |
||||||
|
* reads. |
||||||
|
*/ |
||||||
|
public ReentrantLock getLock() { |
||||||
|
return lock; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return the merged reftable that is implemented by the stack of |
||||||
|
* reftables. Return value must be accessed under lock. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems |
||||||
|
*/ |
||||||
|
private Reftable reader() throws IOException { |
||||||
|
assert lock.isLocked(); |
||||||
|
if (mergedTables == null) { |
||||||
|
mergedTables = openMergedReftable(); |
||||||
|
} |
||||||
|
return mergedTables; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* @return whether the given refName would be illegal in a repository that |
||||||
|
* uses loose refs. |
||||||
|
* @param refName |
||||||
|
* the name to check |
||||||
|
* @param added |
||||||
|
* a sorted set of refs we pretend have been added to the |
||||||
|
* database. |
||||||
|
* @param deleted |
||||||
|
* a set of refs we pretend have been removed from the database. |
||||||
|
* @throws IOException |
||||||
|
* on I/O problems |
||||||
|
*/ |
||||||
|
public boolean isNameConflicting(String refName, TreeSet<String> added, |
||||||
|
Set<String> deleted) throws IOException { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
Reftable table = reader(); |
||||||
|
|
||||||
|
// Cannot be nested within an existing reference.
|
||||||
|
int lastSlash = refName.lastIndexOf('/'); |
||||||
|
while (0 < lastSlash) { |
||||||
|
String prefix = refName.substring(0, lastSlash); |
||||||
|
if (!deleted.contains(prefix) |
||||||
|
&& (table.hasRef(prefix) || added.contains(prefix))) { |
||||||
|
return true; |
||||||
|
} |
||||||
|
lastSlash = refName.lastIndexOf('/', lastSlash - 1); |
||||||
|
} |
||||||
|
|
||||||
|
// Cannot be the container of an existing reference.
|
||||||
|
String prefix = refName + '/'; |
||||||
|
RefCursor c = table.seekRefsWithPrefix(prefix); |
||||||
|
while (c.next()) { |
||||||
|
if (!deleted.contains(c.getRef().getName())) { |
||||||
|
return true; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
String it = added.ceiling(refName + '/'); |
||||||
|
if (it != null && it.startsWith(prefix)) { |
||||||
|
return true; |
||||||
|
} |
||||||
|
return false; |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Read a single reference. |
||||||
|
* <p> |
||||||
|
* This method expects an unshortened reference name and does not search |
||||||
|
* using the standard search path. |
||||||
|
* |
||||||
|
* @param name |
||||||
|
* the unabbreviated name of the reference. |
||||||
|
* @return the reference (if it exists); else {@code null}. |
||||||
|
* @throws java.io.IOException |
||||||
|
* the reference space cannot be accessed. |
||||||
|
*/ |
||||||
|
@Nullable |
||||||
|
public Ref exactRef(String name) throws IOException { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
Reftable table = reader(); |
||||||
|
Ref ref = table.exactRef(name); |
||||||
|
if (ref != null && ref.isSymbolic()) { |
||||||
|
return table.resolve(ref); |
||||||
|
} |
||||||
|
return ref; |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns refs whose names start with a given prefix. |
||||||
|
* |
||||||
|
* @param prefix |
||||||
|
* string that names of refs should start with; may be empty (to |
||||||
|
* return all refs). |
||||||
|
* @return immutable list of refs whose names start with {@code prefix}. |
||||||
|
* @throws java.io.IOException |
||||||
|
* the reference space cannot be accessed. |
||||||
|
*/ |
||||||
|
public List<Ref> getRefsByPrefix(String prefix) throws IOException { |
||||||
|
List<Ref> all = new ArrayList<>(); |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
Reftable table = reader(); |
||||||
|
try (RefCursor rc = RefDatabase.ALL.equals(prefix) ? table.allRefs() |
||||||
|
: table.seekRefsWithPrefix(prefix)) { |
||||||
|
while (rc.next()) { |
||||||
|
Ref ref = table.resolve(rc.getRef()); |
||||||
|
if (ref != null && ref.getObjectId() != null) { |
||||||
|
all.add(ref); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
|
||||||
|
return Collections.unmodifiableList(all); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns all refs that resolve directly to the given {@link ObjectId}. |
||||||
|
* Includes peeled {@linkObjectId}s. |
||||||
|
* |
||||||
|
* @param id |
||||||
|
* {@link ObjectId} to resolve |
||||||
|
* @return a {@link Set} of {@link Ref}s whose tips point to the provided |
||||||
|
* id. |
||||||
|
* @throws java.io.IOException |
||||||
|
* on I/O errors. |
||||||
|
*/ |
||||||
|
public Set<Ref> getTipsWithSha1(ObjectId id) throws IOException { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
RefCursor cursor = reader().byObjectId(id); |
||||||
|
Set<Ref> refs = new HashSet<>(); |
||||||
|
while (cursor.next()) { |
||||||
|
refs.add(cursor.getRef()); |
||||||
|
} |
||||||
|
return refs; |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Drops all data that might be cached in memory. |
||||||
|
*/ |
||||||
|
public void clearCache() { |
||||||
|
lock.lock(); |
||||||
|
try { |
||||||
|
mergedTables = null; |
||||||
|
} finally { |
||||||
|
lock.unlock(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue