From 1b783d037091266b035e1727db6b6ce7a397ef63 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Tue, 27 Jul 2010 19:32:13 -0700 Subject: [PATCH 1/5] Allow PackWriter callers to manage the thread pool By permitting the caller of PackWriter to select the Executor it uses for task execution, we give the caller the ability to manage the lifecycle of the thread pool, including reusing it across concurrent pack generators. This is the first step to supporting application thread pools within Daemon or another managed service like Gerrit Code Review. Change-Id: I96bee7b9c30ff9885f2bd261d0b6daaac713b5a4 Signed-off-by: Shawn O. Pearce --- .../eclipse/jgit/storage/pack/DeltaTask.java | 88 +++++++++++++ .../eclipse/jgit/storage/pack/PackWriter.java | 121 ++++++++++++++---- 2 files changed, 185 insertions(+), 24 deletions(-) create mode 100644 org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java new file mode 100644 index 000000000..11bb3efa7 --- /dev/null +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2010, Google Inc. + * and other copyright owners as documented in the project's IP log. + * + * This program and the accompanying materials are made available + * under the terms of the Eclipse Distribution License v1.0 which + * accompanies this distribution, is reproduced below, and is + * available at http://www.eclipse.org/org/documents/edl-v10.php + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * - Neither the name of the Eclipse Foundation, Inc. nor the + * names of its contributors may be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.eclipse.jgit.storage.pack; + +import java.util.concurrent.Callable; + +import org.eclipse.jgit.lib.ObjectReader; +import org.eclipse.jgit.lib.ProgressMonitor; + +final class DeltaTask implements Callable { + private final PackWriter writer; + + private final ObjectReader templateReader; + + private final DeltaCache dc; + + private final ProgressMonitor pm; + + private final int batchSize; + + private final int start; + + private final ObjectToPack[] list; + + DeltaTask(PackWriter writer, ObjectReader reader, DeltaCache dc, + ProgressMonitor pm, int batchSize, int start, ObjectToPack[] list) { + this.writer = writer; + this.templateReader = reader; + this.dc = dc; + this.pm = pm; + this.batchSize = batchSize; + this.start = start; + this.list = list; + } + + public Object call() throws Exception { + final ObjectReader or = templateReader.newReader(); + try { + DeltaWindow dw; + dw = new DeltaWindow(writer, dc, or); + dw.search(pm, list, start, batchSize); + } finally { + or.release(); + } + return null; + } +} diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java index a9cd45b1d..b2a6697d1 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java @@ -58,8 +58,12 @@ import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.zip.Deflater; import java.util.zip.DeflaterOutputStream; @@ -218,6 +222,8 @@ public class PackWriter { private int threads = 1; + private Executor executor; + private boolean thin; private boolean ignoreMissingUninteresting = true; @@ -603,6 +609,10 @@ public class PackWriter { * During delta compression, if there are enough objects to be considered * the writer will start up concurrent threads and allow them to compress * different sections of the repository concurrently. + *

+ * An application thread pool can be set by {@link #setExecutor(Executor)}. + * If not set a temporary pool will be created by the writer, and torn down + * automatically when compression is over. * * @param threads * number of threads to use. If <= 0 the number of available @@ -612,6 +622,22 @@ public class PackWriter { this.threads = threads; } + /** + * Set the executor to use when using threads. + *

+ * During delta compression if the executor is non-null jobs will be queued + * up on it to perform delta compression in parallel. Aside from setting the + * executor, the caller must set {@link #setThread(int)} to enable threaded + * delta search. + * + * @param executor + * executor to use for threads. Set to null to create a temporary + * executor just for this writer. + */ + public void setExecutor(Executor executor) { + this.executor = executor; + } + /** @return true if this writer is producing a thin pack. */ public boolean isThin() { return thin; @@ -980,11 +1006,8 @@ public class PackWriter { return; } - final List errors = Collections - .synchronizedList(new ArrayList()); final DeltaCache dc = new ThreadSafeDeltaCache(this); final ProgressMonitor pm = new ThreadSafeProgressMonitor(monitor); - final ExecutorService pool = Executors.newFixedThreadPool(threads); // Guess at the size of batch we want. Because we don't really // have a way for a thread to steal work from another thread if @@ -995,6 +1018,7 @@ public class PackWriter { if (estSize < 2 * getDeltaSearchWindowSize()) estSize = 2 * getDeltaSearchWindowSize(); + final List myTasks = new ArrayList(threads * 2); for (int i = 0; i < cnt;) { final int start = i; final int batchSize; @@ -1019,39 +1043,66 @@ public class PackWriter { batchSize = end - start; } i += batchSize; + myTasks.add(new DeltaTask(this, reader, dc, pm, batchSize, start, list)); + } - pool.submit(new Runnable() { - public void run() { + final List errors = Collections + .synchronizedList(new ArrayList()); + if (executor instanceof ExecutorService) { + // Caller supplied us a service, use it directly. + // + runTasks((ExecutorService) executor, myTasks, errors); + + } else if (executor == null) { + // Caller didn't give us a way to run the tasks, spawn up a + // temporary thread pool and make sure it tears down cleanly. + // + ExecutorService pool = Executors.newFixedThreadPool(threads); + try { + runTasks(pool, myTasks, errors); + } finally { + pool.shutdown(); + for (;;) { try { - final ObjectReader or = reader.newReader(); + if (pool.awaitTermination(60, TimeUnit.SECONDS)) + break; + } catch (InterruptedException e) { + throw new IOException( + JGitText.get().packingCancelledDuringObjectsWriting); + } + } + } + } else { + // The caller gave us an executor, but it might not do + // asynchronous execution. Wrap everything and hope it + // can schedule these for us. + // + final CountDownLatch done = new CountDownLatch(myTasks.size()); + for (final DeltaTask task : myTasks) { + executor.execute(new Runnable() { + public void run() { try { - DeltaWindow dw; - dw = new DeltaWindow(PackWriter.this, dc, or); - dw.search(pm, list, start, batchSize); + task.call(); + } catch (Throwable failure) { + errors.add(failure); } finally { - or.release(); + done.countDown(); } - } catch (Throwable err) { - errors.add(err); } - } - }); - } - - // Tell the pool to stop. - // - pool.shutdown(); - for (;;) { + }); + } try { - if (pool.awaitTermination(60, TimeUnit.SECONDS)) - break; - } catch (InterruptedException e) { + done.await(); + } catch (InterruptedException ie) { + // We can't abort the other tasks as we have no handle. + // Cross our fingers and just break out anyway. + // throw new IOException( JGitText.get().packingCancelledDuringObjectsWriting); } } - // If any thread threw an error, try to report it back as + // If any task threw an error, try to report it back as // though we weren't using a threaded search algorithm. // if (!errors.isEmpty()) { @@ -1069,6 +1120,28 @@ public class PackWriter { } } + private void runTasks(ExecutorService pool, List tasks, + List errors) throws IOException { + List> futures = new ArrayList>(tasks.size()); + for (DeltaTask task : tasks) + futures.add(pool.submit(task)); + + try { + for (Future f : futures) { + try { + f.get(); + } catch (ExecutionException failed) { + errors.add(failed.getCause()); + } + } + } catch (InterruptedException ie) { + for (Future f : futures) + f.cancel(true); + throw new IOException( + JGitText.get().packingCancelledDuringObjectsWriting); + } + } + private void writeObjects(ProgressMonitor writeMonitor, PackOutputStream out) throws IOException { for (List list : objectsLists) { From 1a06179ea707ab088b6543df77be5cf0ea44c497 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 28 Jul 2010 10:45:27 -0700 Subject: [PATCH 2/5] Move PackWriter configuration to PackConfig This refactoring permits applications to configure global per-process settings for all packing and easily pass it through to per-request PackWriters, ensuring that the process configuration overrides the repository specific settings. For example this might help in a daemon environment where the server wants to cap the resources used to serve a dynamic upload pack request, even though the repository's own pack.* settings might be configured to be more aggressive. This allows fast but less bandwidth efficient serving of clients, while still retaining good compression through a cron managed `git gc`. Change-Id: I58cc5e01b48924b1a99f79aa96c8150cdfc50846 Signed-off-by: Shawn O. Pearce --- .../jgit/storage/file/PackWriterTest.java | 46 +- .../src/org/eclipse/jgit/lib/Config.java | 15 + .../eclipse/jgit/storage/pack/DeltaCache.java | 6 +- .../eclipse/jgit/storage/pack/DeltaTask.java | 8 +- .../jgit/storage/pack/DeltaWindow.java | 16 +- .../eclipse/jgit/storage/pack/PackConfig.java | 588 +++++++++++++++++- .../eclipse/jgit/storage/pack/PackWriter.java | 485 ++------------- .../storage/pack/ThreadSafeDeltaCache.java | 4 +- 8 files changed, 668 insertions(+), 500 deletions(-) diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java index 9e663d7b4..5685ccac6 100644 --- a/org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java +++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java @@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.TextProgressMonitor; import org.eclipse.jgit.revwalk.RevObject; import org.eclipse.jgit.revwalk.RevWalk; import org.eclipse.jgit.storage.file.PackIndex.MutableEntry; +import org.eclipse.jgit.storage.pack.PackConfig; import org.eclipse.jgit.storage.pack.PackWriter; import org.eclipse.jgit.transport.IndexPack; import org.eclipse.jgit.util.JGitTestUtil; @@ -78,6 +79,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { private static final List EMPTY_LIST_REVS = Collections . emptyList(); + private PackConfig config; + private PackWriter writer; private ByteArrayOutputStream os; @@ -96,16 +99,23 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { packBase = new File(trash, "tmp_pack"); packFile = new File(trash, "tmp_pack.pack"); indexFile = new File(trash, "tmp_pack.idx"); - writer = new PackWriter(db); + config = new PackConfig(db); + } + + public void tearDown() throws Exception { + if (writer != null) + writer.release(); + super.tearDown(); } /** * Test constructor for exceptions, default settings, initialization. */ public void testContructor() { + writer = new PackWriter(config, db.newObjectReader()); assertEquals(false, writer.isDeltaBaseAsOffset()); - assertEquals(true, writer.isReuseDeltas()); - assertEquals(true, writer.isReuseObjects()); + assertEquals(true, config.isReuseDeltas()); + assertEquals(true, config.isReuseObjects()); assertEquals(0, writer.getObjectsNumber()); } @@ -113,13 +123,17 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { * Change default settings and verify them. */ public void testModifySettings() { + config.setReuseDeltas(false); + config.setReuseObjects(false); + config.setDeltaBaseAsOffset(false); + assertEquals(false, config.isReuseDeltas()); + assertEquals(false, config.isReuseObjects()); + assertEquals(false, config.isDeltaBaseAsOffset()); + + writer = new PackWriter(config, db.newObjectReader()); writer.setDeltaBaseAsOffset(true); - writer.setReuseDeltas(false); - writer.setReuseObjects(false); - assertEquals(true, writer.isDeltaBaseAsOffset()); - assertEquals(false, writer.isReuseDeltas()); - assertEquals(false, writer.isReuseObjects()); + assertEquals(false, config.isDeltaBaseAsOffset()); } /** @@ -188,7 +202,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { * @throws IOException */ public void testWritePack1() throws IOException { - writer.setReuseDeltas(false); + config.setReuseDeltas(false); writeVerifyPack1(); } @@ -199,8 +213,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { * @throws IOException */ public void testWritePack1NoObjectReuse() throws IOException { - writer.setReuseDeltas(false); - writer.setReuseObjects(false); + config.setReuseDeltas(false); + config.setReuseObjects(false); writeVerifyPack1(); } @@ -231,7 +245,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { * @throws IOException */ public void testWritePack2DeltasReuseOffsets() throws IOException { - writer.setDeltaBaseAsOffset(true); + config.setDeltaBaseAsOffset(true); writeVerifyPack2(true); } @@ -265,7 +279,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { * */ public void testWritePack3() throws MissingObjectException, IOException { - writer.setReuseDeltas(false); + config.setReuseDeltas(false); final ObjectId forcedOrder[] = new ObjectId[] { ObjectId.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"), ObjectId.fromString("c59759f143fb1fe21c197981df75a7ee00290799"), @@ -363,7 +377,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { } public void testWriteIndex() throws Exception { - writer.setIndexVersion(2); + config.setIndexVersion(2); writeVerifyPack4(false); // Validate that IndexPack came up with the right CRC32 value. @@ -419,7 +433,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { } private void writeVerifyPack2(boolean deltaReuse) throws IOException { - writer.setReuseDeltas(deltaReuse); + config.setReuseDeltas(deltaReuse); final LinkedList interestings = new LinkedList(); interestings.add(ObjectId .fromString("82c6b885ff600be425b4ea96dee75dca255b69e7")); @@ -482,6 +496,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { final boolean ignoreMissingUninteresting) throws MissingObjectException, IOException { NullProgressMonitor m = NullProgressMonitor.INSTANCE; + writer = new PackWriter(config, db.newObjectReader()); writer.setThin(thin); writer.setIgnoreMissingUninteresting(ignoreMissingUninteresting); writer.preparePack(m, interestings, uninterestings); @@ -493,6 +508,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase { private void createVerifyOpenPack(final Iterator objectSource) throws MissingObjectException, IOException { NullProgressMonitor m = NullProgressMonitor.INSTANCE; + writer = new PackWriter(config, db.newObjectReader()); writer.preparePack(objectSource); writer.writePack(m, m, os); writer.release(); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java b/org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java index ccb251691..2e1ab9a07 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java @@ -216,6 +216,21 @@ public class Config { , section, name)); } + /** + * Obtain an integer value from the configuration. + * + * @param section + * section the key is grouped within. + * @param name + * name of the key to get. + * @param defaultValue + * default value to return if no value was present. + * @return an integer value from the configuration, or defaultValue. + */ + public long getLong(String section, String name, long defaultValue) { + return getLong(section, null, name, defaultValue); + } + /** * Obtain an integer value from the configuration. * diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java index b6a7436f1..93eab19e2 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java @@ -55,9 +55,9 @@ class DeltaCache { private long used; - DeltaCache(PackWriter pw) { - size = pw.getDeltaCacheSize(); - entryLimit = pw.getDeltaCacheLimit(); + DeltaCache(PackConfig pc) { + size = pc.getDeltaCacheSize(); + entryLimit = pc.getDeltaCacheLimit(); queue = new ReferenceQueue(); } diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java index 11bb3efa7..5e551e9d4 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java @@ -49,7 +49,7 @@ import org.eclipse.jgit.lib.ObjectReader; import org.eclipse.jgit.lib.ProgressMonitor; final class DeltaTask implements Callable { - private final PackWriter writer; + private final PackConfig config; private final ObjectReader templateReader; @@ -63,9 +63,9 @@ final class DeltaTask implements Callable { private final ObjectToPack[] list; - DeltaTask(PackWriter writer, ObjectReader reader, DeltaCache dc, + DeltaTask(PackConfig config, ObjectReader reader, DeltaCache dc, ProgressMonitor pm, int batchSize, int start, ObjectToPack[] list) { - this.writer = writer; + this.config = config; this.templateReader = reader; this.dc = dc; this.pm = pm; @@ -78,7 +78,7 @@ final class DeltaTask implements Callable { final ObjectReader or = templateReader.newReader(); try { DeltaWindow dw; - dw = new DeltaWindow(writer, dc, or); + dw = new DeltaWindow(config, dc, or); dw.search(pm, list, start, batchSize); } finally { or.release(); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java index 6521e6d3e..c96105650 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java @@ -60,7 +60,7 @@ class DeltaWindow { private static final int NEXT_SRC = 1; - private final PackWriter writer; + private final PackConfig config; private final DeltaCache deltaCache; @@ -101,8 +101,8 @@ class DeltaWindow { /** Used to compress cached deltas. */ private Deflater deflater; - DeltaWindow(PackWriter pw, DeltaCache dc, ObjectReader or) { - writer = pw; + DeltaWindow(PackConfig pc, DeltaCache dc, ObjectReader or) { + config = pc; deltaCache = dc; reader = or; @@ -117,12 +117,12 @@ class DeltaWindow { // PackWriter has a minimum of 2 for the window size, but then // users might complain that JGit is creating a bigger pack file. // - window = new DeltaWindowEntry[pw.getDeltaSearchWindowSize() + 1]; + window = new DeltaWindowEntry[config.getDeltaSearchWindowSize() + 1]; for (int i = 0; i < window.length; i++) window[i] = new DeltaWindowEntry(); - maxMemory = pw.getDeltaSearchMemoryLimit(); - maxDepth = pw.getMaxDeltaDepth(); + maxMemory = config.getDeltaSearchMemoryLimit(); + maxDepth = config.getMaxDeltaDepth(); } void search(ProgressMonitor monitor, ObjectToPack[] toSearch, int off, @@ -442,7 +442,7 @@ class DeltaWindow { IncorrectObjectTypeException, IOException, LargeObjectException { byte[] buf = ent.buffer; if (buf == null) { - buf = writer.buffer(reader, ent.object); + buf = PackWriter.buffer(config, reader, ent.object); if (0 < maxMemory) loaded += buf.length; ent.buffer = buf; @@ -452,7 +452,7 @@ class DeltaWindow { private Deflater deflater() { if (deflater == null) - deflater = new Deflater(writer.getCompressionLevel()); + deflater = new Deflater(config.getCompressionLevel()); else deflater.reset(); return deflater; diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java index 814ab8f29..355ed8a3f 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java @@ -1,5 +1,6 @@ /* - * Copyright (C) 2010, Google Inc. + * Copyright (C) 2008-2010, Google Inc. + * Copyright (C) 2008, Marek Zawirski * and other copyright owners as documented in the project's IP log. * * This program and the accompanying materials are made available @@ -43,51 +44,572 @@ package org.eclipse.jgit.storage.pack; -import static java.util.zip.Deflater.DEFAULT_COMPRESSION; +import java.util.concurrent.Executor; +import java.util.zip.Deflater; + import org.eclipse.jgit.lib.Config; -import org.eclipse.jgit.lib.Config.SectionParser; +import org.eclipse.jgit.lib.Repository; +import org.eclipse.jgit.storage.file.PackIndexWriter; + +/** + * Configuration used by a {@link PackWriter} when constructing the stream. + * + * A configuration may be modified once created, but should not be modified + * while it is being used by a PackWriter. If a configuration is not modified it + * is safe to share the same configuration instance between multiple concurrent + * threads executing different PackWriters. + */ +public class PackConfig { + /** + * Default value of deltas reuse option: {@value} + * + * @see #setReuseDeltas(boolean) + */ + public static final boolean DEFAULT_REUSE_DELTAS = true; + + /** + * Default value of objects reuse option: {@value} + * + * @see #setReuseObjects(boolean) + */ + public static final boolean DEFAULT_REUSE_OBJECTS = true; + + /** + * Default value of delta compress option: {@value} + * + * @see #setDeltaCompress(boolean) + */ + public static final boolean DEFAULT_DELTA_COMPRESS = true; + + /** + * Default value of delta base as offset option: {@value} + * + * @see #setDeltaBaseAsOffset(boolean) + */ + public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false; + + /** + * Default value of maximum delta chain depth: {@value} + * + * @see #setMaxDeltaDepth(int) + */ + public static final int DEFAULT_MAX_DELTA_DEPTH = 50; + + /** + * Default window size during packing: {@value} + * + * @see #setDeltaSearchWindowSize(int) + */ + public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10; + + /** + * Default big file threshold: {@value} + * + * @see #setBigFileThreshold(long) + */ + public static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024; + + /** + * Default delta cache size: {@value} + * + * @see #setDeltaCacheSize(long) + */ + public static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024; + + /** + * Default delta cache limit: {@value} + * + * @see #setDeltaCacheLimit(int) + */ + public static final int DEFAULT_DELTA_CACHE_LIMIT = 100; + + /** + * Default index version: {@value} + * + * @see #setIndexVersion(int) + */ + public static final int DEFAULT_INDEX_VERSION = 2; + + + private int compressionLevel = Deflater.DEFAULT_COMPRESSION; + + private boolean reuseDeltas = DEFAULT_REUSE_DELTAS; + + private boolean reuseObjects = DEFAULT_REUSE_OBJECTS; + + private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET; + + private boolean deltaCompress = DEFAULT_DELTA_COMPRESS; + + private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH; -class PackConfig { - /** Key for {@link Config#get(SectionParser)}. */ - static final Config.SectionParser KEY = new SectionParser() { - public PackConfig parse(final Config cfg) { - return new PackConfig(cfg); - } - }; + private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE; - final int deltaWindow; + private long deltaSearchMemoryLimit; - final long deltaWindowMemory; + private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE; - final int deltaDepth; + private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT; - final long deltaCacheSize; + private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD; - final int deltaCacheLimit; + private int threads; - final int compression; + private Executor executor; - final int indexVersion; + private int indexVersion = DEFAULT_INDEX_VERSION; - final long bigFileThreshold; - final int threads; + /** Create a default configuration. */ + public PackConfig() { + // Fields are initialized to defaults. + } + + /** + * Create a configuration honoring the repository's settings. + * + * @param db + * the repository to read settings from. The repository is not + * retained by the new configuration, instead its settings are + * copied during the constructor. + */ + public PackConfig(Repository db) { + fromConfig(db.getConfig()); + } + + /** + * Create a configuration honoring settings in a {@link Config}. + * + * @param cfg + * the source to read settings from. The source is not retained + * by the new configuration, instead its settings are copied + * during the constructor. + */ + public PackConfig(Config cfg) { + fromConfig(cfg); + } + + /** + * Check whether to reuse deltas existing in repository. + * + * Default setting: {@value #DEFAULT_REUSE_DELTAS} + * + * @return true if object is configured to reuse deltas; false otherwise. + */ + public boolean isReuseDeltas() { + return reuseDeltas; + } + + /** + * Set reuse deltas configuration option for the writer. + * + * When enabled, writer will search for delta representation of object in + * repository and use it if possible. Normally, only deltas with base to + * another object existing in set of objects to pack will be used. The + * exception however is thin-packs where the base object may exist on the + * other side. + * + * When raw delta data is directly copied from a pack file, its checksum is + * computed to verify the data is not corrupt. + * + * Default setting: {@value #DEFAULT_REUSE_DELTAS} + * + * @param reuseDeltas + * boolean indicating whether or not try to reuse deltas. + */ + public void setReuseDeltas(boolean reuseDeltas) { + this.reuseDeltas = reuseDeltas; + } + + /** + * Checks whether to reuse existing objects representation in repository. + * + * Default setting: {@value #DEFAULT_REUSE_OBJECTS} + * + * @return true if writer is configured to reuse objects representation from + * pack; false otherwise. + */ + public boolean isReuseObjects() { + return reuseObjects; + } + + /** + * Set reuse objects configuration option for the writer. + * + * If enabled, writer searches for compressed representation in a pack file. + * If possible, compressed data is directly copied from such a pack file. + * Data checksum is verified. + * + * Default setting: {@value #DEFAULT_REUSE_OBJECTS} + * + * @param reuseObjects + * boolean indicating whether or not writer should reuse existing + * objects representation. + */ + public void setReuseObjects(boolean reuseObjects) { + this.reuseObjects = reuseObjects; + } + + /** + * True if writer can use offsets to point to a delta base. + * + * If true the writer may choose to use an offset to point to a delta base + * in the same pack, this is a newer style of reference that saves space. + * False if the writer has to use the older (and more compatible style) of + * storing the full ObjectId of the delta base. + * + * Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET} + * + * @return true if delta base is stored as an offset; false if it is stored + * as an ObjectId. + */ + public boolean isDeltaBaseAsOffset() { + return deltaBaseAsOffset; + } + + /** + * Set writer delta base format. + * + * Delta base can be written as an offset in a pack file (new approach + * reducing file size) or as an object id (legacy approach, compatible with + * old readers). + * + * Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET} + * + * @param deltaBaseAsOffset + * boolean indicating whether delta base can be stored as an + * offset. + */ + public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) { + this.deltaBaseAsOffset = deltaBaseAsOffset; + } + + /** + * Check whether the writer will create new deltas on the fly. + * + * Default setting: {@value #DEFAULT_DELTA_COMPRESS} + * + * @return true if the writer will create a new delta when either + * {@link #isReuseDeltas()} is false, or no suitable delta is + * available for reuse. + */ + public boolean isDeltaCompress() { + return deltaCompress; + } + + /** + * Set whether or not the writer will create new deltas on the fly. + * + * Default setting: {@value #DEFAULT_DELTA_COMPRESS} + * + * @param deltaCompress + * true to create deltas when {@link #isReuseDeltas()} is false, + * or when a suitable delta isn't available for reuse. Set to + * false to write whole objects instead. + */ + public void setDeltaCompress(boolean deltaCompress) { + this.deltaCompress = deltaCompress; + } + + /** + * Get maximum depth of delta chain set up for the writer. + * + * Generated chains are not longer than this value. + * + * Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH} + * + * @return maximum delta chain depth. + */ + public int getMaxDeltaDepth() { + return maxDeltaDepth; + } + + /** + * Set up maximum depth of delta chain for the writer. + * + * Generated chains are not longer than this value. Too low value causes low + * compression level, while too big makes unpacking (reading) longer. + * + * Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH} + * + * @param maxDeltaDepth + * maximum delta chain depth. + */ + public void setMaxDeltaDepth(int maxDeltaDepth) { + this.maxDeltaDepth = maxDeltaDepth; + } + + /** + * Get the number of objects to try when looking for a delta base. + * + * This limit is per thread, if 4 threads are used the actual memory used + * will be 4 times this value. + * + * Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE} + * + * @return the object count to be searched. + */ + public int getDeltaSearchWindowSize() { + return deltaSearchWindowSize; + } + + /** + * Set the number of objects considered when searching for a delta base. + * + * Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE} + * + * @param objectCount + * number of objects to search at once. Must be at least 2. + */ + public void setDeltaSearchWindowSize(int objectCount) { + if (objectCount <= 2) + setDeltaCompress(false); + else + deltaSearchWindowSize = objectCount; + } + + /** + * Get maximum number of bytes to put into the delta search window. + * + * Default setting is 0, for an unlimited amount of memory usage. Actual + * memory used is the lower limit of either this setting, or the sum of + * space used by at most {@link #getDeltaSearchWindowSize()} objects. + * + * This limit is per thread, if 4 threads are used the actual memory limit + * will be 4 times this value. + * + * @return the memory limit. + */ + public long getDeltaSearchMemoryLimit() { + return deltaSearchMemoryLimit; + } + + /** + * Set the maximum number of bytes to put into the delta search window. + * + * Default setting is 0, for an unlimited amount of memory usage. If the + * memory limit is reached before {@link #getDeltaSearchWindowSize()} the + * window size is temporarily lowered. + * + * @param memoryLimit + * Maximum number of bytes to load at once, 0 for unlimited. + */ + public void setDeltaSearchMemoryLimit(long memoryLimit) { + deltaSearchMemoryLimit = memoryLimit; + } + + /** + * Get the size of the in-memory delta cache. + * + * This limit is for the entire writer, even if multiple threads are used. + * + * Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE} + * + * @return maximum number of bytes worth of delta data to cache in memory. + * If 0 the cache is infinite in size (up to the JVM heap limit + * anyway). A very tiny size such as 1 indicates the cache is + * effectively disabled. + */ + public long getDeltaCacheSize() { + return deltaCacheSize; + } + + /** + * Set the maximum number of bytes of delta data to cache. + * + * During delta search, up to this many bytes worth of small or hard to + * compute deltas will be stored in memory. This cache speeds up writing by + * allowing the cached entry to simply be dumped to the output stream. + * + * Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE} + * + * @param size + * number of bytes to cache. Set to 0 to enable an infinite + * cache, set to 1 (an impossible size for any delta) to disable + * the cache. + */ + public void setDeltaCacheSize(long size) { + deltaCacheSize = size; + } + + /** + * Maximum size in bytes of a delta to cache. + * + * Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT} + * + * @return maximum size (in bytes) of a delta that should be cached. + */ + public int getDeltaCacheLimit() { + return deltaCacheLimit; + } + + /** + * Set the maximum size of a delta that should be cached. + * + * During delta search, any delta smaller than this size will be cached, up + * to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing + * by allowing these cached deltas to be output as-is. + * + * Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT} + * + * @param size + * maximum size (in bytes) of a delta to be cached. + */ + public void setDeltaCacheLimit(int size) { + deltaCacheLimit = size; + } + + /** + * Get the maximum file size that will be delta compressed. + * + * Files bigger than this setting will not be delta compressed, as they are + * more than likely already highly compressed binary data files that do not + * delta compress well, such as MPEG videos. + * + * Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD} + * + * @return the configured big file threshold. + */ + public long getBigFileThreshold() { + return bigFileThreshold; + } + + /** + * Set the maximum file size that should be considered for deltas. + * + * Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD} + * + * @param bigFileThreshold + * the limit, in bytes. + */ + public void setBigFileThreshold(long bigFileThreshold) { + this.bigFileThreshold = bigFileThreshold; + } + + /** + * Get the compression level applied to objects in the pack. + * + * Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION} + * + * @return current compression level, see {@link java.util.zip.Deflater}. + */ + public int getCompressionLevel() { + return compressionLevel; + } + + /** + * Set the compression level applied to objects in the pack. + * + * Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION} + * + * @param level + * compression level, must be a valid level recognized by the + * {@link java.util.zip.Deflater} class. + */ + public void setCompressionLevel(int level) { + compressionLevel = level; + } + + /** + * Get the number of threads used during delta compression. + * + * Default setting: 0 (auto-detect processors) + * + * @return number of threads used for delta compression. 0 will auto-detect + * the threads to the number of available processors. + */ + public int getThreads() { + return threads; + } + + /** + * Set the number of threads to use for delta compression. + * + * During delta compression, if there are enough objects to be considered + * the writer will start up concurrent threads and allow them to compress + * different sections of the repository concurrently. + * + * An application thread pool can be set by {@link #setExecutor(Executor)}. + * If not set a temporary pool will be created by the writer, and torn down + * automatically when compression is over. + * + * Default setting: 0 (auto-detect processors) + * + * @param threads + * number of threads to use. If <= 0 the number of available + * processors for this JVM is used. + */ + public void setThreads(int threads) { + this.threads = threads; + } + + /** @return the preferred thread pool to execute delta search on. */ + public Executor getExecutor() { + return executor; + } + + /** + * Set the executor to use when using threads. + * + * During delta compression if the executor is non-null jobs will be queued + * up on it to perform delta compression in parallel. Aside from setting the + * executor, the caller must set {@link #setThreads(int)} to enable threaded + * delta search. + * + * @param executor + * executor to use for threads. Set to null to create a temporary + * executor just for the writer. + */ + public void setExecutor(Executor executor) { + this.executor = executor; + } + + /** + * Get the pack index file format version this instance creates. + * + * Default setting: {@value #DEFAULT_INDEX_VERSION} + * + * @return the index version, the special version 0 designates the oldest + * (most compatible) format available for the objects. + * @see PackIndexWriter + */ + public int getIndexVersion() { + return indexVersion; + } - private PackConfig(Config rc) { - deltaWindow = rc.getInt("pack", "window", PackWriter.DEFAULT_DELTA_SEARCH_WINDOW_SIZE); - deltaWindowMemory = rc.getLong("pack", null, "windowmemory", 0); - deltaCacheSize = rc.getLong("pack", null, "deltacachesize", PackWriter.DEFAULT_DELTA_CACHE_SIZE); - deltaCacheLimit = rc.getInt("pack", "deltacachelimit", PackWriter.DEFAULT_DELTA_CACHE_LIMIT); - deltaDepth = rc.getInt("pack", "depth", PackWriter.DEFAULT_MAX_DELTA_DEPTH); - compression = compression(rc); - indexVersion = rc.getInt("pack", "indexversion", 2); - bigFileThreshold = rc.getLong("core", null, "bigfilethreshold", PackWriter.DEFAULT_BIG_FILE_THRESHOLD); - threads = rc.getInt("pack", "threads", 0); + /** + * Set the pack index file format version this instance will create. + * + * Default setting: {@value #DEFAULT_INDEX_VERSION} + * + * @param version + * the version to write. The special version 0 designates the + * oldest (most compatible) format available for the objects. + * @see PackIndexWriter + */ + public void setIndexVersion(final int version) { + indexVersion = version; } - private static int compression(Config rc) { - if (rc.getString("pack", null, "compression") != null) - return rc.getInt("pack", "compression", DEFAULT_COMPRESSION); - return rc.getInt("core", "compression", DEFAULT_COMPRESSION); + /** + * Update properties by setting fields from the configuration. + * + * If a property's corresponding variable is not defined in the supplied + * configuration, then it is left unmodified. + * + * @param rc + * configuration to read properties from. + */ + public void fromConfig(final Config rc) { + setMaxDeltaDepth(rc.getInt("pack", "depth", getMaxDeltaDepth())); + setDeltaSearchWindowSize(rc.getInt("pack", "window", getDeltaSearchWindowSize())); + setDeltaSearchMemoryLimit(rc.getLong("pack", "windowmemory", getDeltaSearchMemoryLimit())); + setDeltaCacheSize(rc.getLong("pack", "deltacachesize", getDeltaCacheSize())); + setDeltaCacheLimit(rc.getInt("pack", "deltacachelimit", getDeltaCacheLimit())); + setCompressionLevel(rc.getInt("pack", "compression", + rc.getInt("core", "compression", getCompressionLevel()))); + setIndexVersion(rc.getInt("pack", "indexversion", getIndexVersion())); + setBigFileThreshold(rc.getLong("core", "bigfilethreshold", getBigFileThreshold())); + setThreads(rc.getInt("pack", "threads", getThreads())); } } diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java index b2a6697d1..df5594cf4 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java @@ -75,7 +75,6 @@ import org.eclipse.jgit.errors.LargeObjectException; import org.eclipse.jgit.errors.MissingObjectException; import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException; import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.Config; import org.eclipse.jgit.lib.Constants; import org.eclipse.jgit.lib.NullProgressMonitor; import org.eclipse.jgit.lib.ObjectId; @@ -127,47 +126,6 @@ import org.eclipse.jgit.util.TemporaryBuffer; *

*/ public class PackWriter { - /** - * Default value of deltas reuse option. - * - * @see #setReuseDeltas(boolean) - */ - public static final boolean DEFAULT_REUSE_DELTAS = true; - - /** - * Default value of objects reuse option. - * - * @see #setReuseObjects(boolean) - */ - public static final boolean DEFAULT_REUSE_OBJECTS = true; - - /** - * Default value of delta base as offset option. - * - * @see #setDeltaBaseAsOffset(boolean) - */ - public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false; - - /** - * Default value of maximum delta chain depth. - * - * @see #setMaxDeltaDepth(int) - */ - public static final int DEFAULT_MAX_DELTA_DEPTH = 50; - - /** - * Default window size during packing. - * - * @see #setDeltaSearchWindowSize(int) - */ - public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10; - - static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024; - - static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024; - - static final int DEFAULT_DELTA_CACHE_LIMIT = 100; - private static final int PACK_VERSION_GENERATED = 2; @SuppressWarnings("unchecked") @@ -185,8 +143,6 @@ public class PackWriter { // edge objects for thin packs private final ObjectIdSubclassMap edgeObjects = new ObjectIdSubclassMap(); - private int compressionLevel; - private Deflater myDeflater; private final ObjectReader reader; @@ -194,35 +150,15 @@ public class PackWriter { /** {@link #reader} recast to the reuse interface, if it supports it. */ private final ObjectReuseAsIs reuseSupport; + private final PackConfig config; + private List sortedByName; private byte packcsum[]; - private boolean reuseDeltas = DEFAULT_REUSE_DELTAS; - - private boolean reuseObjects = DEFAULT_REUSE_OBJECTS; + private boolean deltaBaseAsOffset; - private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET; - - private boolean deltaCompress = true; - - private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH; - - private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE; - - private long deltaSearchMemoryLimit; - - private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE; - - private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT; - - private int indexVersion; - - private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD; - - private int threads = 1; - - private Executor executor; + private boolean reuseDeltas; private boolean thin; @@ -251,7 +187,7 @@ public class PackWriter { * reader to read from the repository with. */ public PackWriter(final ObjectReader reader) { - this(null, reader); + this(new PackConfig(), reader); } /** @@ -266,105 +202,38 @@ public class PackWriter { * reader to read from the repository with. */ public PackWriter(final Repository repo, final ObjectReader reader) { - this.reader = reader; - if (reader instanceof ObjectReuseAsIs) - reuseSupport = ((ObjectReuseAsIs) reader); - else - reuseSupport = null; - - final PackConfig pc = configOf(repo).get(PackConfig.KEY); - deltaSearchWindowSize = pc.deltaWindow; - deltaSearchMemoryLimit = pc.deltaWindowMemory; - deltaCacheSize = pc.deltaCacheSize; - deltaCacheLimit = pc.deltaCacheLimit; - maxDeltaDepth = pc.deltaDepth; - compressionLevel = pc.compression; - indexVersion = pc.indexVersion; - bigFileThreshold = pc.bigFileThreshold; - threads = pc.threads; - } - - private static Config configOf(final Repository repo) { - if (repo == null) - return new Config(); - return repo.getConfig(); - } - - /** - * Check whether object is configured to reuse deltas existing in - * repository. - *

- * Default setting: {@link #DEFAULT_REUSE_DELTAS} - *

- * - * @return true if object is configured to reuse deltas; false otherwise. - */ - public boolean isReuseDeltas() { - return reuseDeltas; + this(new PackConfig(repo), reader); } /** - * Set reuse deltas configuration option for this writer. When enabled, - * writer will search for delta representation of object in repository and - * use it if possible. Normally, only deltas with base to another object - * existing in set of objects to pack will be used. Exception is however - * thin-pack (see - * {@link #preparePack(ProgressMonitor, Collection, Collection)} and - * {@link #preparePack(Iterator)}) where base object must exist on other - * side machine. + * Create writer with a specified configuration. *

- * When raw delta data is directly copied from a pack file, checksum is - * computed to verify data. - *

- *

- * Default setting: {@link #DEFAULT_REUSE_DELTAS} - *

- * - * @param reuseDeltas - * boolean indicating whether or not try to reuse deltas. - */ - public void setReuseDeltas(boolean reuseDeltas) { - this.reuseDeltas = reuseDeltas; - } - - /** - * Checks whether object is configured to reuse existing objects - * representation in repository. - *

- * Default setting: {@link #DEFAULT_REUSE_OBJECTS} - *

+ * Objects for packing are specified in {@link #preparePack(Iterator)} or + * {@link #preparePack(ProgressMonitor, Collection, Collection)}. * - * @return true if writer is configured to reuse objects representation from - * pack; false otherwise. + * @param config + * configuration for the pack writer. + * @param reader + * reader to read from the repository with. */ - public boolean isReuseObjects() { - return reuseObjects; - } + public PackWriter(final PackConfig config, final ObjectReader reader) { + this.config = config; + this.reader = reader; + if (reader instanceof ObjectReuseAsIs) + reuseSupport = ((ObjectReuseAsIs) reader); + else + reuseSupport = null; - /** - * Set reuse objects configuration option for this writer. If enabled, - * writer searches for representation in a pack file. If possible, - * compressed data is directly copied from such a pack file. Data checksum - * is verified. - *

- * Default setting: {@link #DEFAULT_REUSE_OBJECTS} - *

- * - * @param reuseObjects - * boolean indicating whether or not writer should reuse existing - * objects representation. - */ - public void setReuseObjects(boolean reuseObjects) { - this.reuseObjects = reuseObjects; + deltaBaseAsOffset = config.isDeltaBaseAsOffset(); + reuseDeltas = config.isReuseDeltas(); } /** * Check whether writer can store delta base as an offset (new style * reducing pack size) or should store it as an object id (legacy style, * compatible with old readers). - *

- * Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET} - *

+ * + * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET} * * @return true if delta base is stored as an offset; false if it is stored * as an object id. @@ -377,9 +246,8 @@ public class PackWriter { * Set writer delta base format. Delta base can be written as an offset in a * pack file (new approach reducing file size) or as an object id (legacy * approach, compatible with old readers). - *

- * Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET} - *

+ * + * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET} * * @param deltaBaseAsOffset * boolean indicating whether delta base can be stored as an @@ -389,255 +257,6 @@ public class PackWriter { this.deltaBaseAsOffset = deltaBaseAsOffset; } - /** - * Check whether the writer will create new deltas on the fly. - *

- * Default setting: true - *

- * - * @return true if the writer will create a new delta when either - * {@link #isReuseDeltas()} is false, or no suitable delta is - * available for reuse. - */ - public boolean isDeltaCompress() { - return deltaCompress; - } - - /** - * Set whether or not the writer will create new deltas on the fly. - * - * @param deltaCompress - * true to create deltas when {@link #isReuseDeltas()} is false, - * or when a suitable delta isn't available for reuse. Set to - * false to write whole objects instead. - */ - public void setDeltaCompress(boolean deltaCompress) { - this.deltaCompress = deltaCompress; - } - - /** - * Get maximum depth of delta chain set up for this writer. Generated chains - * are not longer than this value. - *

- * Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH} - *

- * - * @return maximum delta chain depth. - */ - public int getMaxDeltaDepth() { - return maxDeltaDepth; - } - - /** - * Set up maximum depth of delta chain for this writer. Generated chains are - * not longer than this value. Too low value causes low compression level, - * while too big makes unpacking (reading) longer. - *

- * Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH} - *

- * - * @param maxDeltaDepth - * maximum delta chain depth. - */ - public void setMaxDeltaDepth(int maxDeltaDepth) { - this.maxDeltaDepth = maxDeltaDepth; - } - - /** - * Get the number of objects to try when looking for a delta base. - *

- * This limit is per thread, if 4 threads are used the actual memory - * used will be 4 times this value. - * - * @return the object count to be searched. - */ - public int getDeltaSearchWindowSize() { - return deltaSearchWindowSize; - } - - /** - * Set the number of objects considered when searching for a delta base. - *

- * Default setting: {@link #DEFAULT_DELTA_SEARCH_WINDOW_SIZE} - *

- * - * @param objectCount - * number of objects to search at once. Must be at least 2. - */ - public void setDeltaSearchWindowSize(int objectCount) { - if (objectCount <= 2) - setDeltaCompress(false); - else - deltaSearchWindowSize = objectCount; - } - - /** - * Get maximum number of bytes to put into the delta search window. - *

- * Default setting is 0, for an unlimited amount of memory usage. Actual - * memory used is the lower limit of either this setting, or the sum of - * space used by at most {@link #getDeltaSearchWindowSize()} objects. - *

- * This limit is per thread, if 4 threads are used the actual memory - * limit will be 4 times this value. - * - * @return the memory limit. - */ - public long getDeltaSearchMemoryLimit() { - return deltaSearchMemoryLimit; - } - - /** - * Set the maximum number of bytes to put into the delta search window. - *

- * Default setting is 0, for an unlimited amount of memory usage. If the - * memory limit is reached before {@link #getDeltaSearchWindowSize()} the - * window size is temporarily lowered. - * - * @param memoryLimit - * Maximum number of bytes to load at once, 0 for unlimited. - */ - public void setDeltaSearchMemoryLimit(long memoryLimit) { - deltaSearchMemoryLimit = memoryLimit; - } - - /** - * Get the size of the in-memory delta cache. - *

- * This limit is for the entire writer, even if multiple threads are used. - * - * @return maximum number of bytes worth of delta data to cache in memory. - * If 0 the cache is infinite in size (up to the JVM heap limit - * anyway). A very tiny size such as 1 indicates the cache is - * effectively disabled. - */ - public long getDeltaCacheSize() { - return deltaCacheSize; - } - - /** - * Set the maximum number of bytes of delta data to cache. - *

- * During delta search, up to this many bytes worth of small or hard to - * compute deltas will be stored in memory. This cache speeds up writing by - * allowing the cached entry to simply be dumped to the output stream. - * - * @param size - * number of bytes to cache. Set to 0 to enable an infinite - * cache, set to 1 (an impossible size for any delta) to disable - * the cache. - */ - public void setDeltaCacheSize(long size) { - deltaCacheSize = size; - } - - /** - * Maximum size in bytes of a delta to cache. - * - * @return maximum size (in bytes) of a delta that should be cached. - */ - public int getDeltaCacheLimit() { - return deltaCacheLimit; - } - - /** - * Set the maximum size of a delta that should be cached. - *

- * During delta search, any delta smaller than this size will be cached, up - * to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing - * by allowing these cached deltas to be output as-is. - * - * @param size - * maximum size (in bytes) of a delta to be cached. - */ - public void setDeltaCacheLimit(int size) { - deltaCacheLimit = size; - } - - /** - * Get the maximum file size that will be delta compressed. - *

- * Files bigger than this setting will not be delta compressed, as they are - * more than likely already highly compressed binary data files that do not - * delta compress well, such as MPEG videos. - * - * @return the configured big file threshold. - */ - public long getBigFileThreshold() { - return bigFileThreshold; - } - - /** - * Set the maximum file size that should be considered for deltas. - * - * @param bigFileThreshold - * the limit, in bytes. - */ - public void setBigFileThreshold(long bigFileThreshold) { - this.bigFileThreshold = bigFileThreshold; - } - - /** - * Get the compression level applied to objects in the pack. - * - * @return current compression level, see {@link java.util.zip.Deflater}. - */ - public int getCompressionLevel() { - return compressionLevel; - } - - /** - * Set the compression level applied to objects in the pack. - * - * @param level - * compression level, must be a valid level recognized by the - * {@link java.util.zip.Deflater} class. Typically this setting - * is {@link java.util.zip.Deflater#BEST_SPEED}. - */ - public void setCompressionLevel(int level) { - compressionLevel = level; - } - - /** @return number of threads used for delta compression. */ - public int getThreads() { - return threads; - } - - /** - * Set the number of threads to use for delta compression. - *

- * During delta compression, if there are enough objects to be considered - * the writer will start up concurrent threads and allow them to compress - * different sections of the repository concurrently. - *

- * An application thread pool can be set by {@link #setExecutor(Executor)}. - * If not set a temporary pool will be created by the writer, and torn down - * automatically when compression is over. - * - * @param threads - * number of threads to use. If <= 0 the number of available - * processors for this JVM is used. - */ - public void setThread(int threads) { - this.threads = threads; - } - - /** - * Set the executor to use when using threads. - *

- * During delta compression if the executor is non-null jobs will be queued - * up on it to perform delta compression in parallel. Aside from setting the - * executor, the caller must set {@link #setThread(int)} to enable threaded - * delta search. - * - * @param executor - * executor to use for threads. Set to null to create a temporary - * executor just for this writer. - */ - public void setExecutor(Executor executor) { - this.executor = executor; - } - /** @return true if this writer is producing a thin pack. */ public boolean isThin() { return thin; @@ -677,18 +296,6 @@ public class PackWriter { ignoreMissingUninteresting = ignore; } - /** - * Set the pack index file format version this instance will create. - * - * @param version - * the version to write. The special version 0 designates the - * oldest (most compatible) format available for the objects. - * @see PackIndexWriter - */ - public void setIndexVersion(final int version) { - indexVersion = version; - } - /** * Returns objects number in a pack file that was created by this writer. * @@ -817,6 +424,7 @@ public class PackWriter { public void writeIndex(final OutputStream indexStream) throws IOException { final List list = sortByName(); final PackIndexWriter iw; + int indexVersion = config.getIndexVersion(); if (indexVersion <= 0) iw = PackIndexWriter.createOldestPossible(indexStream, list); else @@ -868,9 +476,9 @@ public class PackWriter { if (writeMonitor == null) writeMonitor = NullProgressMonitor.INSTANCE; - if ((reuseDeltas || reuseObjects) && reuseSupport != null) + if ((reuseDeltas || config.isReuseObjects()) && reuseSupport != null) searchForReuse(); - if (deltaCompress) + if (config.isDeltaCompress()) searchForDeltas(compressMonitor); final PackOutputStream out = new PackOutputStream(writeMonitor, @@ -980,7 +588,7 @@ public class PackWriter { // If its too big for us to handle, skip over it. // - if (bigFileThreshold <= sz || Integer.MAX_VALUE <= sz) + if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE <= sz) return false; // If its too tiny for the delta compression to work, skip it. @@ -996,17 +604,18 @@ public class PackWriter { final ObjectToPack[] list, final int cnt) throws MissingObjectException, IncorrectObjectTypeException, LargeObjectException, IOException { + int threads = config.getThreads(); if (threads == 0) threads = Runtime.getRuntime().availableProcessors(); - if (threads <= 1 || cnt <= 2 * getDeltaSearchWindowSize()) { - DeltaCache dc = new DeltaCache(this); - DeltaWindow dw = new DeltaWindow(this, dc, reader); + if (threads <= 1 || cnt <= 2 * config.getDeltaSearchWindowSize()) { + DeltaCache dc = new DeltaCache(config); + DeltaWindow dw = new DeltaWindow(config, dc, reader); dw.search(monitor, list, 0, cnt); return; } - final DeltaCache dc = new ThreadSafeDeltaCache(this); + final DeltaCache dc = new ThreadSafeDeltaCache(config); final ProgressMonitor pm = new ThreadSafeProgressMonitor(monitor); // Guess at the size of batch we want. Because we don't really @@ -1015,8 +624,8 @@ public class PackWriter { // are a bit smaller. // int estSize = cnt / (threads * 2); - if (estSize < 2 * getDeltaSearchWindowSize()) - estSize = 2 * getDeltaSearchWindowSize(); + if (estSize < 2 * config.getDeltaSearchWindowSize()) + estSize = 2 * config.getDeltaSearchWindowSize(); final List myTasks = new ArrayList(threads * 2); for (int i = 0; i < cnt;) { @@ -1043,9 +652,10 @@ public class PackWriter { batchSize = end - start; } i += batchSize; - myTasks.add(new DeltaTask(this, reader, dc, pm, batchSize, start, list)); + myTasks.add(new DeltaTask(config, reader, dc, pm, batchSize, start, list)); } + final Executor executor = config.getExecutor(); final List errors = Collections .synchronizedList(new ArrayList()); if (executor instanceof ExecutorService) { @@ -1269,8 +879,8 @@ public class PackWriter { private TemporaryBuffer.Heap delta(final ObjectToPack otp) throws IOException { - DeltaIndex index = new DeltaIndex(buffer(reader, otp.getDeltaBaseId())); - byte[] res = buffer(reader, otp); + DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId())); + byte[] res = buffer(otp); // We never would have proposed this pair if the delta would be // larger than the unpacked version of the object. So using it @@ -1281,7 +891,12 @@ public class PackWriter { return delta; } - byte[] buffer(ObjectReader or, AnyObjectId objId) throws IOException { + private byte[] buffer(AnyObjectId objId) throws IOException { + return buffer(config, reader, objId); + } + + static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId) + throws IOException { ObjectLoader ldr = or.open(objId); if (!ldr.isLarge()) return ldr.getCachedBytes(); @@ -1294,7 +909,7 @@ public class PackWriter { // If it really is too big to work with, abort out now. // long sz = ldr.getSize(); - if (getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz) + if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz) throw new LargeObjectException(objId.copy()); // Its considered to be large by the loader, but we really @@ -1321,7 +936,7 @@ public class PackWriter { private Deflater deflater() { if (myDeflater == null) - myDeflater = new Deflater(compressionLevel); + myDeflater = new Deflater(config.getCompressionLevel()); return myDeflater; } @@ -1477,7 +1092,7 @@ public class PackWriter { otp.clearDeltaBase(); otp.clearReuseAsIs(); } - } else if (nFmt == PACK_WHOLE && reuseObjects) { + } else if (nFmt == PACK_WHOLE && config.isReuseObjects()) { otp.clearDeltaBase(); otp.setReuseAsIs(); otp.setWeight(nWeight); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java index 141289190..2492a05e1 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java @@ -48,8 +48,8 @@ import java.util.concurrent.locks.ReentrantLock; class ThreadSafeDeltaCache extends DeltaCache { private final ReentrantLock lock; - ThreadSafeDeltaCache(PackWriter pw) { - super(pw); + ThreadSafeDeltaCache(PackConfig pc) { + super(pc); lock = new ReentrantLock(); } From bb99ec0aa00aab6d9f60e4d35c93d20bd614e4dc Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 28 Jul 2010 10:52:36 -0700 Subject: [PATCH 3/5] Simplify UploadPack use of options during writing We only use these variables once, so just put them at the proper use site and avoid assigning the local variable. The code is a bit shorter and the intent is a little bit more clear. Change-Id: I70d120fb149b612ac93055ea39bc053b8d90a5db Signed-off-by: Shawn O. Pearce --- .../src/org/eclipse/jgit/transport/UploadPack.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java index 02ce251be..e7338598c 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java @@ -548,8 +548,6 @@ public class UploadPack { } private void sendPack() throws IOException { - final boolean thin = options.contains(OPTION_THIN_PACK); - final boolean progress = !options.contains(OPTION_NO_PROGRESS); final boolean sideband = options.contains(OPTION_SIDE_BAND) || options.contains(OPTION_SIDE_BAND_64K); @@ -563,7 +561,7 @@ public class UploadPack { packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); - if (progress) + if (!options.contains(OPTION_NO_PROGRESS)) pm = new SideBandProgressMonitor(new SideBandOutputStream( SideBandOutputStream.CH_PROGRESS, bufsz, rawOut)); } @@ -571,7 +569,7 @@ public class UploadPack { final PackWriter pw = new PackWriter(db, walk.getObjectReader()); try { pw.setDeltaBaseAsOffset(options.contains(OPTION_OFS_DELTA)); - pw.setThin(thin); + pw.setThin(options.contains(OPTION_THIN_PACK)); pw.preparePack(pm, wantAll, commonBase); if (options.contains(OPTION_INCLUDE_TAG)) { for (final Ref r : refs.values()) { From 9fbce904e6471b49668457caa4c0c2d4131a0fb5 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 28 Jul 2010 10:48:53 -0700 Subject: [PATCH 4/5] Pass PackConfig down to PackWriter when packing When we are creating a pack the higher level application should be able to override the PackConfig used, allowing it to control the number of threads used or how much memory is allocated per writer. Change-Id: I47795987bb0d161d3642082acc2f617d7cb28d8c Signed-off-by: Shawn O. Pearce --- .../org/eclipse/jgit/pgm/CLIText.properties | 3 ++ .../src/org/eclipse/jgit/pgm/CLIText.java | 1 + .../src/org/eclipse/jgit/pgm/Daemon.java | 37 ++++++++++++++++++- .../transport/BasePackPushConnection.java | 3 +- .../eclipse/jgit/transport/BundleWriter.java | 22 ++++++++++- .../org/eclipse/jgit/transport/Daemon.java | 15 ++++++++ .../org/eclipse/jgit/transport/Transport.java | 30 +++++++++++++++ .../eclipse/jgit/transport/UploadPack.java | 20 +++++++++- .../jgit/transport/WalkPushConnection.java | 24 +++++++----- 9 files changed, 140 insertions(+), 15 deletions(-) diff --git a/org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties b/org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties index e879d6b60..2fff6d463 100644 --- a/org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties +++ b/org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties @@ -14,6 +14,7 @@ branchCreatedFrom =branch: Created from {0} branchIsNotAnAncestorOfYourCurrentHEAD=The branch '{0}' is not an ancestor of your current HEAD.\nIf you are sure you want to delete it, run 'jgit branch -D {0}'. branchNotFound=branch '{0}' not found. cacheTreePathInfo="{0}": {1} entries, {2} children +configFileNotFound=configuration file {0} not found cannotBeRenamed={0} cannot be renamed cannotChekoutNoHeadsAdvertisedByRemote=cannot checkout; no HEAD advertised by remote cannotCreateCommand=Cannot create command {0} @@ -61,6 +62,7 @@ metaVar_bucket=BUCKET metaVar_command=command metaVar_commitOrTag=COMMIT|TAG metaVar_commitish=commit-ish +metaVar_configFile=FILE metaVar_connProp=conn.prop metaVar_directory=DIRECTORY metaVar_file=FILE @@ -138,6 +140,7 @@ usage_approveDestructionOfRepository=approve destruction of repository usage_beMoreVerbose=be more verbose usage_beVerbose=be verbose usage_cloneRepositoryIntoNewDir=Clone a repository into a new directory +usage_configFile=configuration file usage_configureTheServiceInDaemonServicename=configure the service in daemon.servicename usage_deleteBranchEvenIfNotMerged=delete branch (even if not merged) usage_deleteFullyMergedBranch=delete fully merged branch diff --git a/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java b/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java index bae895cc7..14dcb1f50 100644 --- a/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java +++ b/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java @@ -67,6 +67,7 @@ public class CLIText extends TranslationBundle { /***/ public String branchIsNotAnAncestorOfYourCurrentHEAD; /***/ public String branchNotFound; /***/ public String cacheTreePathInfo; + /***/ public String configFileNotFound; /***/ public String cannotBeRenamed; /***/ public String cannotChekoutNoHeadsAdvertisedByRemote; /***/ public String cannotCreateCommand; diff --git a/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java b/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java index f015a9e7b..3cca87abe 100644 --- a/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java +++ b/org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java @@ -48,13 +48,22 @@ import java.net.InetSocketAddress; import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Executors; +import org.eclipse.jgit.storage.file.FileBasedConfig; +import org.eclipse.jgit.storage.file.WindowCache; +import org.eclipse.jgit.storage.file.WindowCacheConfig; +import org.eclipse.jgit.storage.pack.PackConfig; +import org.eclipse.jgit.transport.DaemonService; +import org.eclipse.jgit.util.FS; import org.kohsuke.args4j.Argument; import org.kohsuke.args4j.Option; -import org.eclipse.jgit.transport.DaemonService; @Command(common = true, usage = "usage_exportRepositoriesOverGit") class Daemon extends TextBuiltin { + @Option(name = "--config-file", metaVar = "metaVar_configFile", usage = "usage_configFile") + File configFile; + @Option(name = "--port", metaVar = "metaVar_port", usage = "usage_portNumberToListenOn") int port = org.eclipse.jgit.transport.Daemon.DEFAULT_PORT; @@ -89,12 +98,38 @@ class Daemon extends TextBuiltin { @Override protected void run() throws Exception { + PackConfig packConfig = new PackConfig(); + + if (configFile != null) { + if (!configFile.exists()) { + throw die(MessageFormat.format( + CLIText.get().configFileNotFound, // + configFile.getAbsolutePath())); + } + + FileBasedConfig cfg = new FileBasedConfig(configFile, FS.DETECTED); + cfg.load(); + + WindowCacheConfig wcc = new WindowCacheConfig(); + wcc.fromConfig(cfg); + WindowCache.reconfigure(wcc); + + packConfig.fromConfig(cfg); + } + + int threads = packConfig.getThreads(); + if (threads <= 0) + threads = Runtime.getRuntime().availableProcessors(); + if (1 < threads) + packConfig.setExecutor(Executors.newFixedThreadPool(threads)); + final org.eclipse.jgit.transport.Daemon d; d = new org.eclipse.jgit.transport.Daemon( host != null ? new InetSocketAddress(host, port) : new InetSocketAddress(port)); d.setExportAll(exportAll); + d.setPackConfig(packConfig); if (0 <= timeout) d.setTimeout(timeout); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java index 297105d46..0838f29c1 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java @@ -231,7 +231,8 @@ class BasePackPushConnection extends BasePackConnection implements List newObjects = new ArrayList(refUpdates.size()); final long start; - final PackWriter writer = new PackWriter(local); + final PackWriter writer = new PackWriter(transport.getPackConfig(), + local.newObjectReader()); try { for (final Ref r : getRefs()) diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java index 79fa58c36..b513412da 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java @@ -61,6 +61,7 @@ import org.eclipse.jgit.lib.ProgressMonitor; import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.revwalk.RevCommit; +import org.eclipse.jgit.storage.pack.PackConfig; import org.eclipse.jgit.storage.pack.PackWriter; /** @@ -81,12 +82,14 @@ import org.eclipse.jgit.storage.pack.PackWriter; * overall bundle size. */ public class BundleWriter { - private final PackWriter packWriter; + private final Repository db; private final Map include; private final Set assume; + private PackConfig packConfig; + /** * Create a writer for a bundle. * @@ -94,11 +97,22 @@ public class BundleWriter { * repository where objects are stored. */ public BundleWriter(final Repository repo) { - packWriter = new PackWriter(repo); + db = repo; include = new TreeMap(); assume = new HashSet(); } + /** + * Set the configuration used by the pack generator. + * + * @param pc + * configuration controlling packing parameters. If null the + * source repository's settings will be used. + */ + public void setPackConfig(PackConfig pc) { + this.packConfig = pc; + } + /** * Include an object (and everything reachable from it) in the bundle. * @@ -166,6 +180,10 @@ public class BundleWriter { */ public void writeBundle(ProgressMonitor monitor, OutputStream os) throws IOException { + PackConfig pc = packConfig; + if (pc == null) + pc = new PackConfig(db); + PackWriter packWriter = new PackWriter(pc, db.newObjectReader()); try { final HashSet inc = new HashSet(); final HashSet exc = new HashSet(); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java index aa2e2521c..0bc5fb3a2 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java @@ -63,6 +63,7 @@ import org.eclipse.jgit.lib.PersonIdent; import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.lib.RepositoryCache; import org.eclipse.jgit.lib.RepositoryCache.FileKey; +import org.eclipse.jgit.storage.pack.PackConfig; import org.eclipse.jgit.util.FS; /** Basic daemon for the anonymous git:// transport protocol. */ @@ -90,6 +91,8 @@ public class Daemon { private int timeout; + private PackConfig packConfig; + /** Configure a daemon to listen on any available network port. */ public Daemon() { this(null); @@ -120,6 +123,7 @@ public class Daemon { final UploadPack rp = new UploadPack(db); final InputStream in = dc.getInputStream(); rp.setTimeout(Daemon.this.getTimeout()); + rp.setPackConfig(Daemon.this.packConfig); rp.upload(in, dc.getOutputStream(), null); } }, new DaemonService("receive-pack", "receivepack") { @@ -242,6 +246,17 @@ public class Daemon { timeout = seconds; } + /** + * Set the configuration used by the pack generator. + * + * @param pc + * configuration controlling packing parameters. If null the + * source repository's settings will be used. + */ + public void setPackConfig(PackConfig pc) { + this.packConfig = pc; + } + /** * Start this daemon on a background thread. * diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java index a8e47afd3..500cf0cff 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java @@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.ProgressMonitor; import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.lib.TransferConfig; +import org.eclipse.jgit.storage.pack.PackConfig; import org.eclipse.jgit.util.FS; /** @@ -554,6 +555,9 @@ public abstract class Transport { /** Timeout in seconds to wait before aborting an IO read or write. */ private int timeout; + /** Pack configuration used by this transport to make pack file. */ + private PackConfig packConfig; + /** * Create a new transport instance. * @@ -791,6 +795,32 @@ public abstract class Transport { timeout = seconds; } + /** + * Get the configuration used by the pack generator to make packs. + * + * If {@link #setPackConfig(PackConfig)} was previously given null a new + * PackConfig is created on demand by this method using the source + * repository's settings. + * + * @return the pack configuration. Never null. + */ + public PackConfig getPackConfig() { + if (packConfig == null) + packConfig = new PackConfig(local); + return packConfig; + } + + /** + * Set the configuration used by the pack generator. + * + * @param pc + * configuration controlling packing parameters. If null the + * source repository's settings will be used. + */ + public void setPackConfig(PackConfig pc) { + packConfig = pc; + } + /** * Fetch objects and refs from the remote repository to the local one. *

diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java index e7338598c..16d56df66 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java @@ -69,6 +69,7 @@ import org.eclipse.jgit.revwalk.RevFlagSet; import org.eclipse.jgit.revwalk.RevObject; import org.eclipse.jgit.revwalk.RevTag; import org.eclipse.jgit.revwalk.RevWalk; +import org.eclipse.jgit.storage.pack.PackConfig; import org.eclipse.jgit.storage.pack.PackWriter; import org.eclipse.jgit.transport.BasePackFetchConnection.MultiAck; import org.eclipse.jgit.transport.RefAdvertiser.PacketLineOutRefAdvertiser; @@ -102,6 +103,9 @@ public class UploadPack { /** Revision traversal support over {@link #db}. */ private final RevWalk walk; + /** Configuration to pass into the PackWriter. */ + private PackConfig packConfig; + /** Timeout in seconds to wait for client interaction. */ private int timeout; @@ -258,6 +262,17 @@ public class UploadPack { this.refFilter = refFilter != null ? refFilter : RefFilter.DEFAULT; } + /** + * Set the configuration used by the pack generator. + * + * @param pc + * configuration controlling packing parameters. If null the + * source repository's settings will be used. + */ + public void setPackConfig(PackConfig pc) { + this.packConfig = pc; + } + /** * Execute the upload task on the socket. * @@ -566,7 +581,10 @@ public class UploadPack { SideBandOutputStream.CH_PROGRESS, bufsz, rawOut)); } - final PackWriter pw = new PackWriter(db, walk.getObjectReader()); + PackConfig cfg = packConfig; + if (cfg == null) + cfg = new PackConfig(db); + final PackWriter pw = new PackWriter(cfg, walk.getObjectReader()); try { pw.setDeltaBaseAsOffset(options.contains(OPTION_OFS_DELTA)); pw.setThin(options.contains(OPTION_THIN_PACK)); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java index bbc918f25..9ce0ec1b3 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java @@ -103,6 +103,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection { /** Database connection to the remote repository. */ private final WalkRemoteObjectDatabase dest; + /** The configured transport we were constructed by. */ + private final Transport transport; + /** * Packs already known to reside in the remote repository. *

@@ -123,9 +126,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection { WalkPushConnection(final WalkTransport walkTransport, final WalkRemoteObjectDatabase w) { - Transport t = (Transport)walkTransport; - local = t.local; - uri = t.getURI(); + transport = (Transport) walkTransport; + local = transport.local; + uri = transport.getURI(); dest = w; } @@ -209,7 +212,8 @@ class WalkPushConnection extends BaseConnection implements PushConnection { String pathPack = null; String pathIdx = null; - final PackWriter pw = new PackWriter(local); + final PackWriter writer = new PackWriter(transport.getPackConfig(), + local.newObjectReader()); try { final List need = new ArrayList(); final List have = new ArrayList(); @@ -220,20 +224,20 @@ class WalkPushConnection extends BaseConnection implements PushConnection { if (r.getPeeledObjectId() != null) have.add(r.getPeeledObjectId()); } - pw.preparePack(monitor, need, have); + writer.preparePack(monitor, need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // - if (pw.getObjectsNumber() == 0) + if (writer.getObjectsNumber() == 0) return; packNames = new LinkedHashMap(); for (final String n : dest.getPackNames()) packNames.put(n, n); - final String base = "pack-" + pw.computeName().name(); + final String base = "pack-" + writer.computeName().name(); final String packName = base + ".pack"; pathPack = "pack/" + packName; pathIdx = "pack/" + base + ".idx"; @@ -254,7 +258,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection { OutputStream os = dest.writeFile(pathPack, monitor, wt + "..pack"); try { os = new BufferedOutputStream(os); - pw.writePack(monitor, monitor, os); + writer.writePack(monitor, monitor, os); } finally { os.close(); } @@ -262,7 +266,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection { os = dest.writeFile(pathIdx, monitor, wt + "..idx"); try { os = new BufferedOutputStream(os); - pw.writeIndex(os); + writer.writeIndex(os); } finally { os.close(); } @@ -282,7 +286,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection { throw new TransportException(uri, JGitText.get().cannotStoreObjects, err); } finally { - pw.release(); + writer.release(); } } From 5f5da8b1d47db5a390ef409095c76d5290441620 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 28 Jul 2010 11:23:32 -0700 Subject: [PATCH 5/5] Enable configuration of non-standard pack settings For daemons we might want to disable delta compression entirely, or in some strange case an administrator might need to turn of delta reuse. Expose these normally internal pack settings through the pack configuration section. Change-Id: I39bfefee8384c864cc04ffac724f197240c8a11a Signed-off-by: Shawn O. Pearce --- .../src/org/eclipse/jgit/storage/pack/PackConfig.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java index 355ed8a3f..9bda76d96 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java @@ -611,5 +611,11 @@ public class PackConfig { setIndexVersion(rc.getInt("pack", "indexversion", getIndexVersion())); setBigFileThreshold(rc.getLong("core", "bigfilethreshold", getBigFileThreshold())); setThreads(rc.getInt("pack", "threads", getThreads())); + + // These variables aren't standardized + // + setReuseDeltas(rc.getBoolean("pack", "reusedeltas", isReuseDeltas())); + setReuseObjects(rc.getBoolean("pack", "reuseobjects", isReuseObjects())); + setDeltaCompress(rc.getBoolean("pack", "deltacompression", isDeltaCompress())); } }