Browse Source

Merge changes I39bfefee,I47795987,I70d120fb,I58cc5e01,I96bee7b9

* changes:
  Enable configuration of non-standard pack settings
  Pass PackConfig down to PackWriter when packing
  Simplify UploadPack use of options during writing
  Move PackWriter configuration to PackConfig
  Allow PackWriter callers to manage the thread pool
stable-0.9
Shawn O. Pearce 14 years ago committed by Code Review
parent
commit
8e9cc826e9
  1. 3
      org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties
  2. 1
      org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java
  3. 37
      org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java
  4. 46
      org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java
  5. 15
      org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java
  6. 6
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java
  7. 88
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java
  8. 16
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java
  9. 592
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java
  10. 556
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java
  11. 4
      org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java
  12. 3
      org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java
  13. 22
      org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java
  14. 15
      org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java
  15. 30
      org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java
  16. 26
      org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java
  17. 24
      org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java

3
org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties

@ -14,6 +14,7 @@ branchCreatedFrom =branch: Created from {0}
branchIsNotAnAncestorOfYourCurrentHEAD=The branch '{0}' is not an ancestor of your current HEAD.\nIf you are sure you want to delete it, run 'jgit branch -D {0}'. branchIsNotAnAncestorOfYourCurrentHEAD=The branch '{0}' is not an ancestor of your current HEAD.\nIf you are sure you want to delete it, run 'jgit branch -D {0}'.
branchNotFound=branch '{0}' not found. branchNotFound=branch '{0}' not found.
cacheTreePathInfo="{0}": {1} entries, {2} children cacheTreePathInfo="{0}": {1} entries, {2} children
configFileNotFound=configuration file {0} not found
cannotBeRenamed={0} cannot be renamed cannotBeRenamed={0} cannot be renamed
cannotChekoutNoHeadsAdvertisedByRemote=cannot checkout; no HEAD advertised by remote cannotChekoutNoHeadsAdvertisedByRemote=cannot checkout; no HEAD advertised by remote
cannotCreateCommand=Cannot create command {0} cannotCreateCommand=Cannot create command {0}
@ -61,6 +62,7 @@ metaVar_bucket=BUCKET
metaVar_command=command metaVar_command=command
metaVar_commitOrTag=COMMIT|TAG metaVar_commitOrTag=COMMIT|TAG
metaVar_commitish=commit-ish metaVar_commitish=commit-ish
metaVar_configFile=FILE
metaVar_connProp=conn.prop metaVar_connProp=conn.prop
metaVar_directory=DIRECTORY metaVar_directory=DIRECTORY
metaVar_file=FILE metaVar_file=FILE
@ -138,6 +140,7 @@ usage_approveDestructionOfRepository=approve destruction of repository
usage_beMoreVerbose=be more verbose usage_beMoreVerbose=be more verbose
usage_beVerbose=be verbose usage_beVerbose=be verbose
usage_cloneRepositoryIntoNewDir=Clone a repository into a new directory usage_cloneRepositoryIntoNewDir=Clone a repository into a new directory
usage_configFile=configuration file
usage_configureTheServiceInDaemonServicename=configure the service in daemon.servicename usage_configureTheServiceInDaemonServicename=configure the service in daemon.servicename
usage_deleteBranchEvenIfNotMerged=delete branch (even if not merged) usage_deleteBranchEvenIfNotMerged=delete branch (even if not merged)
usage_deleteFullyMergedBranch=delete fully merged branch usage_deleteFullyMergedBranch=delete fully merged branch

1
org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java

@ -67,6 +67,7 @@ public class CLIText extends TranslationBundle {
/***/ public String branchIsNotAnAncestorOfYourCurrentHEAD; /***/ public String branchIsNotAnAncestorOfYourCurrentHEAD;
/***/ public String branchNotFound; /***/ public String branchNotFound;
/***/ public String cacheTreePathInfo; /***/ public String cacheTreePathInfo;
/***/ public String configFileNotFound;
/***/ public String cannotBeRenamed; /***/ public String cannotBeRenamed;
/***/ public String cannotChekoutNoHeadsAdvertisedByRemote; /***/ public String cannotChekoutNoHeadsAdvertisedByRemote;
/***/ public String cannotCreateCommand; /***/ public String cannotCreateCommand;

37
org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java

@ -48,13 +48,22 @@ import java.net.InetSocketAddress;
import java.text.MessageFormat; import java.text.MessageFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.Executors;
import org.eclipse.jgit.storage.file.FileBasedConfig;
import org.eclipse.jgit.storage.file.WindowCache;
import org.eclipse.jgit.storage.file.WindowCacheConfig;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.transport.DaemonService;
import org.eclipse.jgit.util.FS;
import org.kohsuke.args4j.Argument; import org.kohsuke.args4j.Argument;
import org.kohsuke.args4j.Option; import org.kohsuke.args4j.Option;
import org.eclipse.jgit.transport.DaemonService;
@Command(common = true, usage = "usage_exportRepositoriesOverGit") @Command(common = true, usage = "usage_exportRepositoriesOverGit")
class Daemon extends TextBuiltin { class Daemon extends TextBuiltin {
@Option(name = "--config-file", metaVar = "metaVar_configFile", usage = "usage_configFile")
File configFile;
@Option(name = "--port", metaVar = "metaVar_port", usage = "usage_portNumberToListenOn") @Option(name = "--port", metaVar = "metaVar_port", usage = "usage_portNumberToListenOn")
int port = org.eclipse.jgit.transport.Daemon.DEFAULT_PORT; int port = org.eclipse.jgit.transport.Daemon.DEFAULT_PORT;
@ -89,12 +98,38 @@ class Daemon extends TextBuiltin {
@Override @Override
protected void run() throws Exception { protected void run() throws Exception {
PackConfig packConfig = new PackConfig();
if (configFile != null) {
if (!configFile.exists()) {
throw die(MessageFormat.format(
CLIText.get().configFileNotFound, //
configFile.getAbsolutePath()));
}
FileBasedConfig cfg = new FileBasedConfig(configFile, FS.DETECTED);
cfg.load();
WindowCacheConfig wcc = new WindowCacheConfig();
wcc.fromConfig(cfg);
WindowCache.reconfigure(wcc);
packConfig.fromConfig(cfg);
}
int threads = packConfig.getThreads();
if (threads <= 0)
threads = Runtime.getRuntime().availableProcessors();
if (1 < threads)
packConfig.setExecutor(Executors.newFixedThreadPool(threads));
final org.eclipse.jgit.transport.Daemon d; final org.eclipse.jgit.transport.Daemon d;
d = new org.eclipse.jgit.transport.Daemon( d = new org.eclipse.jgit.transport.Daemon(
host != null ? new InetSocketAddress(host, port) host != null ? new InetSocketAddress(host, port)
: new InetSocketAddress(port)); : new InetSocketAddress(port));
d.setExportAll(exportAll); d.setExportAll(exportAll);
d.setPackConfig(packConfig);
if (0 <= timeout) if (0 <= timeout)
d.setTimeout(timeout); d.setTimeout(timeout);

46
org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java

@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.TextProgressMonitor;
import org.eclipse.jgit.revwalk.RevObject; import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevWalk; import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.PackIndex.MutableEntry; import org.eclipse.jgit.storage.file.PackIndex.MutableEntry;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter; import org.eclipse.jgit.storage.pack.PackWriter;
import org.eclipse.jgit.transport.IndexPack; import org.eclipse.jgit.transport.IndexPack;
import org.eclipse.jgit.util.JGitTestUtil; import org.eclipse.jgit.util.JGitTestUtil;
@ -78,6 +79,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
private static final List<RevObject> EMPTY_LIST_REVS = Collections private static final List<RevObject> EMPTY_LIST_REVS = Collections
.<RevObject> emptyList(); .<RevObject> emptyList();
private PackConfig config;
private PackWriter writer; private PackWriter writer;
private ByteArrayOutputStream os; private ByteArrayOutputStream os;
@ -96,16 +99,23 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
packBase = new File(trash, "tmp_pack"); packBase = new File(trash, "tmp_pack");
packFile = new File(trash, "tmp_pack.pack"); packFile = new File(trash, "tmp_pack.pack");
indexFile = new File(trash, "tmp_pack.idx"); indexFile = new File(trash, "tmp_pack.idx");
writer = new PackWriter(db); config = new PackConfig(db);
}
public void tearDown() throws Exception {
if (writer != null)
writer.release();
super.tearDown();
} }
/** /**
* Test constructor for exceptions, default settings, initialization. * Test constructor for exceptions, default settings, initialization.
*/ */
public void testContructor() { public void testContructor() {
writer = new PackWriter(config, db.newObjectReader());
assertEquals(false, writer.isDeltaBaseAsOffset()); assertEquals(false, writer.isDeltaBaseAsOffset());
assertEquals(true, writer.isReuseDeltas()); assertEquals(true, config.isReuseDeltas());
assertEquals(true, writer.isReuseObjects()); assertEquals(true, config.isReuseObjects());
assertEquals(0, writer.getObjectsNumber()); assertEquals(0, writer.getObjectsNumber());
} }
@ -113,13 +123,17 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* Change default settings and verify them. * Change default settings and verify them.
*/ */
public void testModifySettings() { public void testModifySettings() {
config.setReuseDeltas(false);
config.setReuseObjects(false);
config.setDeltaBaseAsOffset(false);
assertEquals(false, config.isReuseDeltas());
assertEquals(false, config.isReuseObjects());
assertEquals(false, config.isDeltaBaseAsOffset());
writer = new PackWriter(config, db.newObjectReader());
writer.setDeltaBaseAsOffset(true); writer.setDeltaBaseAsOffset(true);
writer.setReuseDeltas(false);
writer.setReuseObjects(false);
assertEquals(true, writer.isDeltaBaseAsOffset()); assertEquals(true, writer.isDeltaBaseAsOffset());
assertEquals(false, writer.isReuseDeltas()); assertEquals(false, config.isDeltaBaseAsOffset());
assertEquals(false, writer.isReuseObjects());
} }
/** /**
@ -188,7 +202,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException * @throws IOException
*/ */
public void testWritePack1() throws IOException { public void testWritePack1() throws IOException {
writer.setReuseDeltas(false); config.setReuseDeltas(false);
writeVerifyPack1(); writeVerifyPack1();
} }
@ -199,8 +213,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException * @throws IOException
*/ */
public void testWritePack1NoObjectReuse() throws IOException { public void testWritePack1NoObjectReuse() throws IOException {
writer.setReuseDeltas(false); config.setReuseDeltas(false);
writer.setReuseObjects(false); config.setReuseObjects(false);
writeVerifyPack1(); writeVerifyPack1();
} }
@ -231,7 +245,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException * @throws IOException
*/ */
public void testWritePack2DeltasReuseOffsets() throws IOException { public void testWritePack2DeltasReuseOffsets() throws IOException {
writer.setDeltaBaseAsOffset(true); config.setDeltaBaseAsOffset(true);
writeVerifyPack2(true); writeVerifyPack2(true);
} }
@ -265,7 +279,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* *
*/ */
public void testWritePack3() throws MissingObjectException, IOException { public void testWritePack3() throws MissingObjectException, IOException {
writer.setReuseDeltas(false); config.setReuseDeltas(false);
final ObjectId forcedOrder[] = new ObjectId[] { final ObjectId forcedOrder[] = new ObjectId[] {
ObjectId.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"), ObjectId.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"),
ObjectId.fromString("c59759f143fb1fe21c197981df75a7ee00290799"), ObjectId.fromString("c59759f143fb1fe21c197981df75a7ee00290799"),
@ -363,7 +377,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
} }
public void testWriteIndex() throws Exception { public void testWriteIndex() throws Exception {
writer.setIndexVersion(2); config.setIndexVersion(2);
writeVerifyPack4(false); writeVerifyPack4(false);
// Validate that IndexPack came up with the right CRC32 value. // Validate that IndexPack came up with the right CRC32 value.
@ -419,7 +433,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
} }
private void writeVerifyPack2(boolean deltaReuse) throws IOException { private void writeVerifyPack2(boolean deltaReuse) throws IOException {
writer.setReuseDeltas(deltaReuse); config.setReuseDeltas(deltaReuse);
final LinkedList<ObjectId> interestings = new LinkedList<ObjectId>(); final LinkedList<ObjectId> interestings = new LinkedList<ObjectId>();
interestings.add(ObjectId interestings.add(ObjectId
.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7")); .fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"));
@ -482,6 +496,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
final boolean ignoreMissingUninteresting) final boolean ignoreMissingUninteresting)
throws MissingObjectException, IOException { throws MissingObjectException, IOException {
NullProgressMonitor m = NullProgressMonitor.INSTANCE; NullProgressMonitor m = NullProgressMonitor.INSTANCE;
writer = new PackWriter(config, db.newObjectReader());
writer.setThin(thin); writer.setThin(thin);
writer.setIgnoreMissingUninteresting(ignoreMissingUninteresting); writer.setIgnoreMissingUninteresting(ignoreMissingUninteresting);
writer.preparePack(m, interestings, uninterestings); writer.preparePack(m, interestings, uninterestings);
@ -493,6 +508,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
private void createVerifyOpenPack(final Iterator<RevObject> objectSource) private void createVerifyOpenPack(final Iterator<RevObject> objectSource)
throws MissingObjectException, IOException { throws MissingObjectException, IOException {
NullProgressMonitor m = NullProgressMonitor.INSTANCE; NullProgressMonitor m = NullProgressMonitor.INSTANCE;
writer = new PackWriter(config, db.newObjectReader());
writer.preparePack(objectSource); writer.preparePack(objectSource);
writer.writePack(m, m, os); writer.writePack(m, m, os);
writer.release(); writer.release();

15
org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java

@ -216,6 +216,21 @@ public class Config {
, section, name)); , section, name));
} }
/**
* Obtain an integer value from the configuration.
*
* @param section
* section the key is grouped within.
* @param name
* name of the key to get.
* @param defaultValue
* default value to return if no value was present.
* @return an integer value from the configuration, or defaultValue.
*/
public long getLong(String section, String name, long defaultValue) {
return getLong(section, null, name, defaultValue);
}
/** /**
* Obtain an integer value from the configuration. * Obtain an integer value from the configuration.
* *

6
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java

@ -55,9 +55,9 @@ class DeltaCache {
private long used; private long used;
DeltaCache(PackWriter pw) { DeltaCache(PackConfig pc) {
size = pw.getDeltaCacheSize(); size = pc.getDeltaCacheSize();
entryLimit = pw.getDeltaCacheLimit(); entryLimit = pc.getDeltaCacheLimit();
queue = new ReferenceQueue<byte[]>(); queue = new ReferenceQueue<byte[]>();
} }

88
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java

@ -0,0 +1,88 @@
/*
* Copyright (C) 2010, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.storage.pack;
import java.util.concurrent.Callable;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.ProgressMonitor;
final class DeltaTask implements Callable<Object> {
private final PackConfig config;
private final ObjectReader templateReader;
private final DeltaCache dc;
private final ProgressMonitor pm;
private final int batchSize;
private final int start;
private final ObjectToPack[] list;
DeltaTask(PackConfig config, ObjectReader reader, DeltaCache dc,
ProgressMonitor pm, int batchSize, int start, ObjectToPack[] list) {
this.config = config;
this.templateReader = reader;
this.dc = dc;
this.pm = pm;
this.batchSize = batchSize;
this.start = start;
this.list = list;
}
public Object call() throws Exception {
final ObjectReader or = templateReader.newReader();
try {
DeltaWindow dw;
dw = new DeltaWindow(config, dc, or);
dw.search(pm, list, start, batchSize);
} finally {
or.release();
}
return null;
}
}

16
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java

@ -60,7 +60,7 @@ class DeltaWindow {
private static final int NEXT_SRC = 1; private static final int NEXT_SRC = 1;
private final PackWriter writer; private final PackConfig config;
private final DeltaCache deltaCache; private final DeltaCache deltaCache;
@ -101,8 +101,8 @@ class DeltaWindow {
/** Used to compress cached deltas. */ /** Used to compress cached deltas. */
private Deflater deflater; private Deflater deflater;
DeltaWindow(PackWriter pw, DeltaCache dc, ObjectReader or) { DeltaWindow(PackConfig pc, DeltaCache dc, ObjectReader or) {
writer = pw; config = pc;
deltaCache = dc; deltaCache = dc;
reader = or; reader = or;
@ -117,12 +117,12 @@ class DeltaWindow {
// PackWriter has a minimum of 2 for the window size, but then // PackWriter has a minimum of 2 for the window size, but then
// users might complain that JGit is creating a bigger pack file. // users might complain that JGit is creating a bigger pack file.
// //
window = new DeltaWindowEntry[pw.getDeltaSearchWindowSize() + 1]; window = new DeltaWindowEntry[config.getDeltaSearchWindowSize() + 1];
for (int i = 0; i < window.length; i++) for (int i = 0; i < window.length; i++)
window[i] = new DeltaWindowEntry(); window[i] = new DeltaWindowEntry();
maxMemory = pw.getDeltaSearchMemoryLimit(); maxMemory = config.getDeltaSearchMemoryLimit();
maxDepth = pw.getMaxDeltaDepth(); maxDepth = config.getMaxDeltaDepth();
} }
void search(ProgressMonitor monitor, ObjectToPack[] toSearch, int off, void search(ProgressMonitor monitor, ObjectToPack[] toSearch, int off,
@ -442,7 +442,7 @@ class DeltaWindow {
IncorrectObjectTypeException, IOException, LargeObjectException { IncorrectObjectTypeException, IOException, LargeObjectException {
byte[] buf = ent.buffer; byte[] buf = ent.buffer;
if (buf == null) { if (buf == null) {
buf = writer.buffer(reader, ent.object); buf = PackWriter.buffer(config, reader, ent.object);
if (0 < maxMemory) if (0 < maxMemory)
loaded += buf.length; loaded += buf.length;
ent.buffer = buf; ent.buffer = buf;
@ -452,7 +452,7 @@ class DeltaWindow {
private Deflater deflater() { private Deflater deflater() {
if (deflater == null) if (deflater == null)
deflater = new Deflater(writer.getCompressionLevel()); deflater = new Deflater(config.getCompressionLevel());
else else
deflater.reset(); deflater.reset();
return deflater; return deflater;

592
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java

@ -1,5 +1,6 @@
/* /*
* Copyright (C) 2010, Google Inc. * Copyright (C) 2008-2010, Google Inc.
* Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
* and other copyright owners as documented in the project's IP log. * and other copyright owners as documented in the project's IP log.
* *
* This program and the accompanying materials are made available * This program and the accompanying materials are made available
@ -43,51 +44,578 @@
package org.eclipse.jgit.storage.pack; package org.eclipse.jgit.storage.pack;
import static java.util.zip.Deflater.DEFAULT_COMPRESSION; import java.util.concurrent.Executor;
import java.util.zip.Deflater;
import org.eclipse.jgit.lib.Config; import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.Config.SectionParser; import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.storage.file.PackIndexWriter;
/**
* Configuration used by a {@link PackWriter} when constructing the stream.
*
* A configuration may be modified once created, but should not be modified
* while it is being used by a PackWriter. If a configuration is not modified it
* is safe to share the same configuration instance between multiple concurrent
* threads executing different PackWriters.
*/
public class PackConfig {
/**
* Default value of deltas reuse option: {@value}
*
* @see #setReuseDeltas(boolean)
*/
public static final boolean DEFAULT_REUSE_DELTAS = true;
/**
* Default value of objects reuse option: {@value}
*
* @see #setReuseObjects(boolean)
*/
public static final boolean DEFAULT_REUSE_OBJECTS = true;
/**
* Default value of delta compress option: {@value}
*
* @see #setDeltaCompress(boolean)
*/
public static final boolean DEFAULT_DELTA_COMPRESS = true;
/**
* Default value of delta base as offset option: {@value}
*
* @see #setDeltaBaseAsOffset(boolean)
*/
public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false;
/**
* Default value of maximum delta chain depth: {@value}
*
* @see #setMaxDeltaDepth(int)
*/
public static final int DEFAULT_MAX_DELTA_DEPTH = 50;
/**
* Default window size during packing: {@value}
*
* @see #setDeltaSearchWindowSize(int)
*/
public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10;
/**
* Default big file threshold: {@value}
*
* @see #setBigFileThreshold(long)
*/
public static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024;
/**
* Default delta cache size: {@value}
*
* @see #setDeltaCacheSize(long)
*/
public static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024;
/**
* Default delta cache limit: {@value}
*
* @see #setDeltaCacheLimit(int)
*/
public static final int DEFAULT_DELTA_CACHE_LIMIT = 100;
/**
* Default index version: {@value}
*
* @see #setIndexVersion(int)
*/
public static final int DEFAULT_INDEX_VERSION = 2;
private int compressionLevel = Deflater.DEFAULT_COMPRESSION;
private boolean reuseDeltas = DEFAULT_REUSE_DELTAS;
private boolean reuseObjects = DEFAULT_REUSE_OBJECTS;
private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET;
private boolean deltaCompress = DEFAULT_DELTA_COMPRESS;
private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH;
private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE;
private long deltaSearchMemoryLimit;
private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE;
private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT;
private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD;
private int threads;
private Executor executor;
private int indexVersion = DEFAULT_INDEX_VERSION;
/** Create a default configuration. */
public PackConfig() {
// Fields are initialized to defaults.
}
/**
* Create a configuration honoring the repository's settings.
*
* @param db
* the repository to read settings from. The repository is not
* retained by the new configuration, instead its settings are
* copied during the constructor.
*/
public PackConfig(Repository db) {
fromConfig(db.getConfig());
}
/**
* Create a configuration honoring settings in a {@link Config}.
*
* @param cfg
* the source to read settings from. The source is not retained
* by the new configuration, instead its settings are copied
* during the constructor.
*/
public PackConfig(Config cfg) {
fromConfig(cfg);
}
/**
* Check whether to reuse deltas existing in repository.
*
* Default setting: {@value #DEFAULT_REUSE_DELTAS}
*
* @return true if object is configured to reuse deltas; false otherwise.
*/
public boolean isReuseDeltas() {
return reuseDeltas;
}
/**
* Set reuse deltas configuration option for the writer.
*
* When enabled, writer will search for delta representation of object in
* repository and use it if possible. Normally, only deltas with base to
* another object existing in set of objects to pack will be used. The
* exception however is thin-packs where the base object may exist on the
* other side.
*
* When raw delta data is directly copied from a pack file, its checksum is
* computed to verify the data is not corrupt.
*
* Default setting: {@value #DEFAULT_REUSE_DELTAS}
*
* @param reuseDeltas
* boolean indicating whether or not try to reuse deltas.
*/
public void setReuseDeltas(boolean reuseDeltas) {
this.reuseDeltas = reuseDeltas;
}
/**
* Checks whether to reuse existing objects representation in repository.
*
* Default setting: {@value #DEFAULT_REUSE_OBJECTS}
*
* @return true if writer is configured to reuse objects representation from
* pack; false otherwise.
*/
public boolean isReuseObjects() {
return reuseObjects;
}
/**
* Set reuse objects configuration option for the writer.
*
* If enabled, writer searches for compressed representation in a pack file.
* If possible, compressed data is directly copied from such a pack file.
* Data checksum is verified.
*
* Default setting: {@value #DEFAULT_REUSE_OBJECTS}
*
* @param reuseObjects
* boolean indicating whether or not writer should reuse existing
* objects representation.
*/
public void setReuseObjects(boolean reuseObjects) {
this.reuseObjects = reuseObjects;
}
/**
* True if writer can use offsets to point to a delta base.
*
* If true the writer may choose to use an offset to point to a delta base
* in the same pack, this is a newer style of reference that saves space.
* False if the writer has to use the older (and more compatible style) of
* storing the full ObjectId of the delta base.
*
* Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @return true if delta base is stored as an offset; false if it is stored
* as an ObjectId.
*/
public boolean isDeltaBaseAsOffset() {
return deltaBaseAsOffset;
}
/**
* Set writer delta base format.
*
* Delta base can be written as an offset in a pack file (new approach
* reducing file size) or as an object id (legacy approach, compatible with
* old readers).
*
* Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @param deltaBaseAsOffset
* boolean indicating whether delta base can be stored as an
* offset.
*/
public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
this.deltaBaseAsOffset = deltaBaseAsOffset;
}
/**
* Check whether the writer will create new deltas on the fly.
*
* Default setting: {@value #DEFAULT_DELTA_COMPRESS}
*
* @return true if the writer will create a new delta when either
* {@link #isReuseDeltas()} is false, or no suitable delta is
* available for reuse.
*/
public boolean isDeltaCompress() {
return deltaCompress;
}
/**
* Set whether or not the writer will create new deltas on the fly.
*
* Default setting: {@value #DEFAULT_DELTA_COMPRESS}
*
* @param deltaCompress
* true to create deltas when {@link #isReuseDeltas()} is false,
* or when a suitable delta isn't available for reuse. Set to
* false to write whole objects instead.
*/
public void setDeltaCompress(boolean deltaCompress) {
this.deltaCompress = deltaCompress;
}
class PackConfig { /**
/** Key for {@link Config#get(SectionParser)}. */ * Get maximum depth of delta chain set up for the writer.
static final Config.SectionParser<PackConfig> KEY = new SectionParser<PackConfig>() { *
public PackConfig parse(final Config cfg) { * Generated chains are not longer than this value.
return new PackConfig(cfg); *
* Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH}
*
* @return maximum delta chain depth.
*/
public int getMaxDeltaDepth() {
return maxDeltaDepth;
} }
};
final int deltaWindow; /**
* Set up maximum depth of delta chain for the writer.
*
* Generated chains are not longer than this value. Too low value causes low
* compression level, while too big makes unpacking (reading) longer.
*
* Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH}
*
* @param maxDeltaDepth
* maximum delta chain depth.
*/
public void setMaxDeltaDepth(int maxDeltaDepth) {
this.maxDeltaDepth = maxDeltaDepth;
}
final long deltaWindowMemory; /**
* Get the number of objects to try when looking for a delta base.
*
* This limit is per thread, if 4 threads are used the actual memory used
* will be 4 times this value.
*
* Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
*
* @return the object count to be searched.
*/
public int getDeltaSearchWindowSize() {
return deltaSearchWindowSize;
}
final int deltaDepth; /**
* Set the number of objects considered when searching for a delta base.
*
* Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
*
* @param objectCount
* number of objects to search at once. Must be at least 2.
*/
public void setDeltaSearchWindowSize(int objectCount) {
if (objectCount <= 2)
setDeltaCompress(false);
else
deltaSearchWindowSize = objectCount;
}
final long deltaCacheSize; /**
* Get maximum number of bytes to put into the delta search window.
*
* Default setting is 0, for an unlimited amount of memory usage. Actual
* memory used is the lower limit of either this setting, or the sum of
* space used by at most {@link #getDeltaSearchWindowSize()} objects.
*
* This limit is per thread, if 4 threads are used the actual memory limit
* will be 4 times this value.
*
* @return the memory limit.
*/
public long getDeltaSearchMemoryLimit() {
return deltaSearchMemoryLimit;
}
final int deltaCacheLimit; /**
* Set the maximum number of bytes to put into the delta search window.
*
* Default setting is 0, for an unlimited amount of memory usage. If the
* memory limit is reached before {@link #getDeltaSearchWindowSize()} the
* window size is temporarily lowered.
*
* @param memoryLimit
* Maximum number of bytes to load at once, 0 for unlimited.
*/
public void setDeltaSearchMemoryLimit(long memoryLimit) {
deltaSearchMemoryLimit = memoryLimit;
}
final int compression; /**
* Get the size of the in-memory delta cache.
*
* This limit is for the entire writer, even if multiple threads are used.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE}
*
* @return maximum number of bytes worth of delta data to cache in memory.
* If 0 the cache is infinite in size (up to the JVM heap limit
* anyway). A very tiny size such as 1 indicates the cache is
* effectively disabled.
*/
public long getDeltaCacheSize() {
return deltaCacheSize;
}
final int indexVersion; /**
* Set the maximum number of bytes of delta data to cache.
*
* During delta search, up to this many bytes worth of small or hard to
* compute deltas will be stored in memory. This cache speeds up writing by
* allowing the cached entry to simply be dumped to the output stream.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE}
*
* @param size
* number of bytes to cache. Set to 0 to enable an infinite
* cache, set to 1 (an impossible size for any delta) to disable
* the cache.
*/
public void setDeltaCacheSize(long size) {
deltaCacheSize = size;
}
final long bigFileThreshold; /**
* Maximum size in bytes of a delta to cache.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT}
*
* @return maximum size (in bytes) of a delta that should be cached.
*/
public int getDeltaCacheLimit() {
return deltaCacheLimit;
}
final int threads; /**
* Set the maximum size of a delta that should be cached.
*
* During delta search, any delta smaller than this size will be cached, up
* to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing
* by allowing these cached deltas to be output as-is.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT}
*
* @param size
* maximum size (in bytes) of a delta to be cached.
*/
public void setDeltaCacheLimit(int size) {
deltaCacheLimit = size;
}
private PackConfig(Config rc) { /**
deltaWindow = rc.getInt("pack", "window", PackWriter.DEFAULT_DELTA_SEARCH_WINDOW_SIZE); * Get the maximum file size that will be delta compressed.
deltaWindowMemory = rc.getLong("pack", null, "windowmemory", 0); *
deltaCacheSize = rc.getLong("pack", null, "deltacachesize", PackWriter.DEFAULT_DELTA_CACHE_SIZE); * Files bigger than this setting will not be delta compressed, as they are
deltaCacheLimit = rc.getInt("pack", "deltacachelimit", PackWriter.DEFAULT_DELTA_CACHE_LIMIT); * more than likely already highly compressed binary data files that do not
deltaDepth = rc.getInt("pack", "depth", PackWriter.DEFAULT_MAX_DELTA_DEPTH); * delta compress well, such as MPEG videos.
compression = compression(rc); *
indexVersion = rc.getInt("pack", "indexversion", 2); * Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD}
bigFileThreshold = rc.getLong("core", null, "bigfilethreshold", PackWriter.DEFAULT_BIG_FILE_THRESHOLD); *
threads = rc.getInt("pack", "threads", 0); * @return the configured big file threshold.
*/
public long getBigFileThreshold() {
return bigFileThreshold;
}
/**
* Set the maximum file size that should be considered for deltas.
*
* Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD}
*
* @param bigFileThreshold
* the limit, in bytes.
*/
public void setBigFileThreshold(long bigFileThreshold) {
this.bigFileThreshold = bigFileThreshold;
}
/**
* Get the compression level applied to objects in the pack.
*
* Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION}
*
* @return current compression level, see {@link java.util.zip.Deflater}.
*/
public int getCompressionLevel() {
return compressionLevel;
}
/**
* Set the compression level applied to objects in the pack.
*
* Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION}
*
* @param level
* compression level, must be a valid level recognized by the
* {@link java.util.zip.Deflater} class.
*/
public void setCompressionLevel(int level) {
compressionLevel = level;
}
/**
* Get the number of threads used during delta compression.
*
* Default setting: 0 (auto-detect processors)
*
* @return number of threads used for delta compression. 0 will auto-detect
* the threads to the number of available processors.
*/
public int getThreads() {
return threads;
}
/**
* Set the number of threads to use for delta compression.
*
* During delta compression, if there are enough objects to be considered
* the writer will start up concurrent threads and allow them to compress
* different sections of the repository concurrently.
*
* An application thread pool can be set by {@link #setExecutor(Executor)}.
* If not set a temporary pool will be created by the writer, and torn down
* automatically when compression is over.
*
* Default setting: 0 (auto-detect processors)
*
* @param threads
* number of threads to use. If <= 0 the number of available
* processors for this JVM is used.
*/
public void setThreads(int threads) {
this.threads = threads;
} }
private static int compression(Config rc) { /** @return the preferred thread pool to execute delta search on. */
if (rc.getString("pack", null, "compression") != null) public Executor getExecutor() {
return rc.getInt("pack", "compression", DEFAULT_COMPRESSION); return executor;
return rc.getInt("core", "compression", DEFAULT_COMPRESSION); }
/**
* Set the executor to use when using threads.
*
* During delta compression if the executor is non-null jobs will be queued
* up on it to perform delta compression in parallel. Aside from setting the
* executor, the caller must set {@link #setThreads(int)} to enable threaded
* delta search.
*
* @param executor
* executor to use for threads. Set to null to create a temporary
* executor just for the writer.
*/
public void setExecutor(Executor executor) {
this.executor = executor;
}
/**
* Get the pack index file format version this instance creates.
*
* Default setting: {@value #DEFAULT_INDEX_VERSION}
*
* @return the index version, the special version 0 designates the oldest
* (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public int getIndexVersion() {
return indexVersion;
}
/**
* Set the pack index file format version this instance will create.
*
* Default setting: {@value #DEFAULT_INDEX_VERSION}
*
* @param version
* the version to write. The special version 0 designates the
* oldest (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public void setIndexVersion(final int version) {
indexVersion = version;
}
/**
* Update properties by setting fields from the configuration.
*
* If a property's corresponding variable is not defined in the supplied
* configuration, then it is left unmodified.
*
* @param rc
* configuration to read properties from.
*/
public void fromConfig(final Config rc) {
setMaxDeltaDepth(rc.getInt("pack", "depth", getMaxDeltaDepth()));
setDeltaSearchWindowSize(rc.getInt("pack", "window", getDeltaSearchWindowSize()));
setDeltaSearchMemoryLimit(rc.getLong("pack", "windowmemory", getDeltaSearchMemoryLimit()));
setDeltaCacheSize(rc.getLong("pack", "deltacachesize", getDeltaCacheSize()));
setDeltaCacheLimit(rc.getInt("pack", "deltacachelimit", getDeltaCacheLimit()));
setCompressionLevel(rc.getInt("pack", "compression",
rc.getInt("core", "compression", getCompressionLevel())));
setIndexVersion(rc.getInt("pack", "indexversion", getIndexVersion()));
setBigFileThreshold(rc.getLong("core", "bigfilethreshold", getBigFileThreshold()));
setThreads(rc.getInt("pack", "threads", getThreads()));
// These variables aren't standardized
//
setReuseDeltas(rc.getBoolean("pack", "reusedeltas", isReuseDeltas()));
setReuseObjects(rc.getBoolean("pack", "reuseobjects", isReuseObjects()));
setDeltaCompress(rc.getBoolean("pack", "deltacompression", isDeltaCompress()));
} }
} }

556
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java

@ -58,8 +58,12 @@ import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.zip.Deflater; import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream; import java.util.zip.DeflaterOutputStream;
@ -71,7 +75,6 @@ import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.MissingObjectException; import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException; import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
import org.eclipse.jgit.lib.AnyObjectId; import org.eclipse.jgit.lib.AnyObjectId;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.Constants; import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.NullProgressMonitor; import org.eclipse.jgit.lib.NullProgressMonitor;
import org.eclipse.jgit.lib.ObjectId; import org.eclipse.jgit.lib.ObjectId;
@ -123,47 +126,6 @@ import org.eclipse.jgit.util.TemporaryBuffer;
* </p> * </p>
*/ */
public class PackWriter { public class PackWriter {
/**
* Default value of deltas reuse option.
*
* @see #setReuseDeltas(boolean)
*/
public static final boolean DEFAULT_REUSE_DELTAS = true;
/**
* Default value of objects reuse option.
*
* @see #setReuseObjects(boolean)
*/
public static final boolean DEFAULT_REUSE_OBJECTS = true;
/**
* Default value of delta base as offset option.
*
* @see #setDeltaBaseAsOffset(boolean)
*/
public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false;
/**
* Default value of maximum delta chain depth.
*
* @see #setMaxDeltaDepth(int)
*/
public static final int DEFAULT_MAX_DELTA_DEPTH = 50;
/**
* Default window size during packing.
*
* @see #setDeltaSearchWindowSize(int)
*/
public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10;
static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024;
static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024;
static final int DEFAULT_DELTA_CACHE_LIMIT = 100;
private static final int PACK_VERSION_GENERATED = 2; private static final int PACK_VERSION_GENERATED = 2;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -181,8 +143,6 @@ public class PackWriter {
// edge objects for thin packs // edge objects for thin packs
private final ObjectIdSubclassMap<ObjectToPack> edgeObjects = new ObjectIdSubclassMap<ObjectToPack>(); private final ObjectIdSubclassMap<ObjectToPack> edgeObjects = new ObjectIdSubclassMap<ObjectToPack>();
private int compressionLevel;
private Deflater myDeflater; private Deflater myDeflater;
private final ObjectReader reader; private final ObjectReader reader;
@ -190,33 +150,15 @@ public class PackWriter {
/** {@link #reader} recast to the reuse interface, if it supports it. */ /** {@link #reader} recast to the reuse interface, if it supports it. */
private final ObjectReuseAsIs reuseSupport; private final ObjectReuseAsIs reuseSupport;
private final PackConfig config;
private List<ObjectToPack> sortedByName; private List<ObjectToPack> sortedByName;
private byte packcsum[]; private byte packcsum[];
private boolean reuseDeltas = DEFAULT_REUSE_DELTAS; private boolean deltaBaseAsOffset;
private boolean reuseObjects = DEFAULT_REUSE_OBJECTS;
private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET;
private boolean deltaCompress = true;
private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH;
private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE;
private long deltaSearchMemoryLimit; private boolean reuseDeltas;
private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE;
private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT;
private int indexVersion;
private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD;
private int threads = 1;
private boolean thin; private boolean thin;
@ -245,7 +187,7 @@ public class PackWriter {
* reader to read from the repository with. * reader to read from the repository with.
*/ */
public PackWriter(final ObjectReader reader) { public PackWriter(final ObjectReader reader) {
this(null, reader); this(new PackConfig(), reader);
} }
/** /**
@ -260,105 +202,38 @@ public class PackWriter {
* reader to read from the repository with. * reader to read from the repository with.
*/ */
public PackWriter(final Repository repo, final ObjectReader reader) { public PackWriter(final Repository repo, final ObjectReader reader) {
this.reader = reader; this(new PackConfig(repo), reader);
if (reader instanceof ObjectReuseAsIs)
reuseSupport = ((ObjectReuseAsIs) reader);
else
reuseSupport = null;
final PackConfig pc = configOf(repo).get(PackConfig.KEY);
deltaSearchWindowSize = pc.deltaWindow;
deltaSearchMemoryLimit = pc.deltaWindowMemory;
deltaCacheSize = pc.deltaCacheSize;
deltaCacheLimit = pc.deltaCacheLimit;
maxDeltaDepth = pc.deltaDepth;
compressionLevel = pc.compression;
indexVersion = pc.indexVersion;
bigFileThreshold = pc.bigFileThreshold;
threads = pc.threads;
}
private static Config configOf(final Repository repo) {
if (repo == null)
return new Config();
return repo.getConfig();
}
/**
* Check whether object is configured to reuse deltas existing in
* repository.
* <p>
* Default setting: {@link #DEFAULT_REUSE_DELTAS}
* </p>
*
* @return true if object is configured to reuse deltas; false otherwise.
*/
public boolean isReuseDeltas() {
return reuseDeltas;
} }
/** /**
* Set reuse deltas configuration option for this writer. When enabled, * Create writer with a specified configuration.
* writer will search for delta representation of object in repository and
* use it if possible. Normally, only deltas with base to another object
* existing in set of objects to pack will be used. Exception is however
* thin-pack (see
* {@link #preparePack(ProgressMonitor, Collection, Collection)} and
* {@link #preparePack(Iterator)}) where base object must exist on other
* side machine.
* <p>
* When raw delta data is directly copied from a pack file, checksum is
* computed to verify data.
* </p>
* <p> * <p>
* Default setting: {@link #DEFAULT_REUSE_DELTAS} * Objects for packing are specified in {@link #preparePack(Iterator)} or
* </p> * {@link #preparePack(ProgressMonitor, Collection, Collection)}.
*
* @param reuseDeltas
* boolean indicating whether or not try to reuse deltas.
*/
public void setReuseDeltas(boolean reuseDeltas) {
this.reuseDeltas = reuseDeltas;
}
/**
* Checks whether object is configured to reuse existing objects
* representation in repository.
* <p>
* Default setting: {@link #DEFAULT_REUSE_OBJECTS}
* </p>
* *
* @return true if writer is configured to reuse objects representation from * @param config
* pack; false otherwise. * configuration for the pack writer.
* @param reader
* reader to read from the repository with.
*/ */
public boolean isReuseObjects() { public PackWriter(final PackConfig config, final ObjectReader reader) {
return reuseObjects; this.config = config;
} this.reader = reader;
if (reader instanceof ObjectReuseAsIs)
reuseSupport = ((ObjectReuseAsIs) reader);
else
reuseSupport = null;
/** deltaBaseAsOffset = config.isDeltaBaseAsOffset();
* Set reuse objects configuration option for this writer. If enabled, reuseDeltas = config.isReuseDeltas();
* writer searches for representation in a pack file. If possible,
* compressed data is directly copied from such a pack file. Data checksum
* is verified.
* <p>
* Default setting: {@link #DEFAULT_REUSE_OBJECTS}
* </p>
*
* @param reuseObjects
* boolean indicating whether or not writer should reuse existing
* objects representation.
*/
public void setReuseObjects(boolean reuseObjects) {
this.reuseObjects = reuseObjects;
} }
/** /**
* Check whether writer can store delta base as an offset (new style * Check whether writer can store delta base as an offset (new style
* reducing pack size) or should store it as an object id (legacy style, * reducing pack size) or should store it as an object id (legacy style,
* compatible with old readers). * compatible with old readers).
* <p> *
* Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET} * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
* </p>
* *
* @return true if delta base is stored as an offset; false if it is stored * @return true if delta base is stored as an offset; false if it is stored
* as an object id. * as an object id.
@ -371,9 +246,8 @@ public class PackWriter {
* Set writer delta base format. Delta base can be written as an offset in a * Set writer delta base format. Delta base can be written as an offset in a
* pack file (new approach reducing file size) or as an object id (legacy * pack file (new approach reducing file size) or as an object id (legacy
* approach, compatible with old readers). * approach, compatible with old readers).
* <p> *
* Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET} * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
* </p>
* *
* @param deltaBaseAsOffset * @param deltaBaseAsOffset
* boolean indicating whether delta base can be stored as an * boolean indicating whether delta base can be stored as an
@ -383,235 +257,6 @@ public class PackWriter {
this.deltaBaseAsOffset = deltaBaseAsOffset; this.deltaBaseAsOffset = deltaBaseAsOffset;
} }
/**
* Check whether the writer will create new deltas on the fly.
* <p>
* Default setting: true
* </p>
*
* @return true if the writer will create a new delta when either
* {@link #isReuseDeltas()} is false, or no suitable delta is
* available for reuse.
*/
public boolean isDeltaCompress() {
return deltaCompress;
}
/**
* Set whether or not the writer will create new deltas on the fly.
*
* @param deltaCompress
* true to create deltas when {@link #isReuseDeltas()} is false,
* or when a suitable delta isn't available for reuse. Set to
* false to write whole objects instead.
*/
public void setDeltaCompress(boolean deltaCompress) {
this.deltaCompress = deltaCompress;
}
/**
* Get maximum depth of delta chain set up for this writer. Generated chains
* are not longer than this value.
* <p>
* Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH}
* </p>
*
* @return maximum delta chain depth.
*/
public int getMaxDeltaDepth() {
return maxDeltaDepth;
}
/**
* Set up maximum depth of delta chain for this writer. Generated chains are
* not longer than this value. Too low value causes low compression level,
* while too big makes unpacking (reading) longer.
* <p>
* Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH}
* </p>
*
* @param maxDeltaDepth
* maximum delta chain depth.
*/
public void setMaxDeltaDepth(int maxDeltaDepth) {
this.maxDeltaDepth = maxDeltaDepth;
}
/**
* Get the number of objects to try when looking for a delta base.
* <p>
* This limit is per thread, if 4 threads are used the actual memory
* used will be 4 times this value.
*
* @return the object count to be searched.
*/
public int getDeltaSearchWindowSize() {
return deltaSearchWindowSize;
}
/**
* Set the number of objects considered when searching for a delta base.
* <p>
* Default setting: {@link #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
* </p>
*
* @param objectCount
* number of objects to search at once. Must be at least 2.
*/
public void setDeltaSearchWindowSize(int objectCount) {
if (objectCount <= 2)
setDeltaCompress(false);
else
deltaSearchWindowSize = objectCount;
}
/**
* Get maximum number of bytes to put into the delta search window.
* <p>
* Default setting is 0, for an unlimited amount of memory usage. Actual
* memory used is the lower limit of either this setting, or the sum of
* space used by at most {@link #getDeltaSearchWindowSize()} objects.
* <p>
* This limit is per thread, if 4 threads are used the actual memory
* limit will be 4 times this value.
*
* @return the memory limit.
*/
public long getDeltaSearchMemoryLimit() {
return deltaSearchMemoryLimit;
}
/**
* Set the maximum number of bytes to put into the delta search window.
* <p>
* Default setting is 0, for an unlimited amount of memory usage. If the
* memory limit is reached before {@link #getDeltaSearchWindowSize()} the
* window size is temporarily lowered.
*
* @param memoryLimit
* Maximum number of bytes to load at once, 0 for unlimited.
*/
public void setDeltaSearchMemoryLimit(long memoryLimit) {
deltaSearchMemoryLimit = memoryLimit;
}
/**
* Get the size of the in-memory delta cache.
* <p>
* This limit is for the entire writer, even if multiple threads are used.
*
* @return maximum number of bytes worth of delta data to cache in memory.
* If 0 the cache is infinite in size (up to the JVM heap limit
* anyway). A very tiny size such as 1 indicates the cache is
* effectively disabled.
*/
public long getDeltaCacheSize() {
return deltaCacheSize;
}
/**
* Set the maximum number of bytes of delta data to cache.
* <p>
* During delta search, up to this many bytes worth of small or hard to
* compute deltas will be stored in memory. This cache speeds up writing by
* allowing the cached entry to simply be dumped to the output stream.
*
* @param size
* number of bytes to cache. Set to 0 to enable an infinite
* cache, set to 1 (an impossible size for any delta) to disable
* the cache.
*/
public void setDeltaCacheSize(long size) {
deltaCacheSize = size;
}
/**
* Maximum size in bytes of a delta to cache.
*
* @return maximum size (in bytes) of a delta that should be cached.
*/
public int getDeltaCacheLimit() {
return deltaCacheLimit;
}
/**
* Set the maximum size of a delta that should be cached.
* <p>
* During delta search, any delta smaller than this size will be cached, up
* to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing
* by allowing these cached deltas to be output as-is.
*
* @param size
* maximum size (in bytes) of a delta to be cached.
*/
public void setDeltaCacheLimit(int size) {
deltaCacheLimit = size;
}
/**
* Get the maximum file size that will be delta compressed.
* <p>
* Files bigger than this setting will not be delta compressed, as they are
* more than likely already highly compressed binary data files that do not
* delta compress well, such as MPEG videos.
*
* @return the configured big file threshold.
*/
public long getBigFileThreshold() {
return bigFileThreshold;
}
/**
* Set the maximum file size that should be considered for deltas.
*
* @param bigFileThreshold
* the limit, in bytes.
*/
public void setBigFileThreshold(long bigFileThreshold) {
this.bigFileThreshold = bigFileThreshold;
}
/**
* Get the compression level applied to objects in the pack.
*
* @return current compression level, see {@link java.util.zip.Deflater}.
*/
public int getCompressionLevel() {
return compressionLevel;
}
/**
* Set the compression level applied to objects in the pack.
*
* @param level
* compression level, must be a valid level recognized by the
* {@link java.util.zip.Deflater} class. Typically this setting
* is {@link java.util.zip.Deflater#BEST_SPEED}.
*/
public void setCompressionLevel(int level) {
compressionLevel = level;
}
/** @return number of threads used for delta compression. */
public int getThreads() {
return threads;
}
/**
* Set the number of threads to use for delta compression.
* <p>
* During delta compression, if there are enough objects to be considered
* the writer will start up concurrent threads and allow them to compress
* different sections of the repository concurrently.
*
* @param threads
* number of threads to use. If <= 0 the number of available
* processors for this JVM is used.
*/
public void setThread(int threads) {
this.threads = threads;
}
/** @return true if this writer is producing a thin pack. */ /** @return true if this writer is producing a thin pack. */
public boolean isThin() { public boolean isThin() {
return thin; return thin;
@ -651,18 +296,6 @@ public class PackWriter {
ignoreMissingUninteresting = ignore; ignoreMissingUninteresting = ignore;
} }
/**
* Set the pack index file format version this instance will create.
*
* @param version
* the version to write. The special version 0 designates the
* oldest (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public void setIndexVersion(final int version) {
indexVersion = version;
}
/** /**
* Returns objects number in a pack file that was created by this writer. * Returns objects number in a pack file that was created by this writer.
* *
@ -791,6 +424,7 @@ public class PackWriter {
public void writeIndex(final OutputStream indexStream) throws IOException { public void writeIndex(final OutputStream indexStream) throws IOException {
final List<ObjectToPack> list = sortByName(); final List<ObjectToPack> list = sortByName();
final PackIndexWriter iw; final PackIndexWriter iw;
int indexVersion = config.getIndexVersion();
if (indexVersion <= 0) if (indexVersion <= 0)
iw = PackIndexWriter.createOldestPossible(indexStream, list); iw = PackIndexWriter.createOldestPossible(indexStream, list);
else else
@ -842,9 +476,9 @@ public class PackWriter {
if (writeMonitor == null) if (writeMonitor == null)
writeMonitor = NullProgressMonitor.INSTANCE; writeMonitor = NullProgressMonitor.INSTANCE;
if ((reuseDeltas || reuseObjects) && reuseSupport != null) if ((reuseDeltas || config.isReuseObjects()) && reuseSupport != null)
searchForReuse(); searchForReuse();
if (deltaCompress) if (config.isDeltaCompress())
searchForDeltas(compressMonitor); searchForDeltas(compressMonitor);
final PackOutputStream out = new PackOutputStream(writeMonitor, final PackOutputStream out = new PackOutputStream(writeMonitor,
@ -954,7 +588,7 @@ public class PackWriter {
// If its too big for us to handle, skip over it. // If its too big for us to handle, skip over it.
// //
if (bigFileThreshold <= sz || Integer.MAX_VALUE <= sz) if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE <= sz)
return false; return false;
// If its too tiny for the delta compression to work, skip it. // If its too tiny for the delta compression to work, skip it.
@ -970,21 +604,19 @@ public class PackWriter {
final ObjectToPack[] list, final int cnt) final ObjectToPack[] list, final int cnt)
throws MissingObjectException, IncorrectObjectTypeException, throws MissingObjectException, IncorrectObjectTypeException,
LargeObjectException, IOException { LargeObjectException, IOException {
int threads = config.getThreads();
if (threads == 0) if (threads == 0)
threads = Runtime.getRuntime().availableProcessors(); threads = Runtime.getRuntime().availableProcessors();
if (threads <= 1 || cnt <= 2 * getDeltaSearchWindowSize()) { if (threads <= 1 || cnt <= 2 * config.getDeltaSearchWindowSize()) {
DeltaCache dc = new DeltaCache(this); DeltaCache dc = new DeltaCache(config);
DeltaWindow dw = new DeltaWindow(this, dc, reader); DeltaWindow dw = new DeltaWindow(config, dc, reader);
dw.search(monitor, list, 0, cnt); dw.search(monitor, list, 0, cnt);
return; return;
} }
final List<Throwable> errors = Collections final DeltaCache dc = new ThreadSafeDeltaCache(config);
.synchronizedList(new ArrayList<Throwable>());
final DeltaCache dc = new ThreadSafeDeltaCache(this);
final ProgressMonitor pm = new ThreadSafeProgressMonitor(monitor); final ProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
final ExecutorService pool = Executors.newFixedThreadPool(threads);
// Guess at the size of batch we want. Because we don't really // Guess at the size of batch we want. Because we don't really
// have a way for a thread to steal work from another thread if // have a way for a thread to steal work from another thread if
@ -992,9 +624,10 @@ public class PackWriter {
// are a bit smaller. // are a bit smaller.
// //
int estSize = cnt / (threads * 2); int estSize = cnt / (threads * 2);
if (estSize < 2 * getDeltaSearchWindowSize()) if (estSize < 2 * config.getDeltaSearchWindowSize())
estSize = 2 * getDeltaSearchWindowSize(); estSize = 2 * config.getDeltaSearchWindowSize();
final List<DeltaTask> myTasks = new ArrayList<DeltaTask>(threads * 2);
for (int i = 0; i < cnt;) { for (int i = 0; i < cnt;) {
final int start = i; final int start = i;
final int batchSize; final int batchSize;
@ -1019,27 +652,25 @@ public class PackWriter {
batchSize = end - start; batchSize = end - start;
} }
i += batchSize; i += batchSize;
myTasks.add(new DeltaTask(config, reader, dc, pm, batchSize, start, list));
pool.submit(new Runnable() {
public void run() {
try {
final ObjectReader or = reader.newReader();
try {
DeltaWindow dw;
dw = new DeltaWindow(PackWriter.this, dc, or);
dw.search(pm, list, start, batchSize);
} finally {
or.release();
}
} catch (Throwable err) {
errors.add(err);
}
}
});
} }
// Tell the pool to stop. final Executor executor = config.getExecutor();
final List<Throwable> errors = Collections
.synchronizedList(new ArrayList<Throwable>());
if (executor instanceof ExecutorService) {
// Caller supplied us a service, use it directly.
// //
runTasks((ExecutorService) executor, myTasks, errors);
} else if (executor == null) {
// Caller didn't give us a way to run the tasks, spawn up a
// temporary thread pool and make sure it tears down cleanly.
//
ExecutorService pool = Executors.newFixedThreadPool(threads);
try {
runTasks(pool, myTasks, errors);
} finally {
pool.shutdown(); pool.shutdown();
for (;;) { for (;;) {
try { try {
@ -1050,8 +681,38 @@ public class PackWriter {
JGitText.get().packingCancelledDuringObjectsWriting); JGitText.get().packingCancelledDuringObjectsWriting);
} }
} }
}
} else {
// The caller gave us an executor, but it might not do
// asynchronous execution. Wrap everything and hope it
// can schedule these for us.
//
final CountDownLatch done = new CountDownLatch(myTasks.size());
for (final DeltaTask task : myTasks) {
executor.execute(new Runnable() {
public void run() {
try {
task.call();
} catch (Throwable failure) {
errors.add(failure);
} finally {
done.countDown();
}
}
});
}
try {
done.await();
} catch (InterruptedException ie) {
// We can't abort the other tasks as we have no handle.
// Cross our fingers and just break out anyway.
//
throw new IOException(
JGitText.get().packingCancelledDuringObjectsWriting);
}
}
// If any thread threw an error, try to report it back as // If any task threw an error, try to report it back as
// though we weren't using a threaded search algorithm. // though we weren't using a threaded search algorithm.
// //
if (!errors.isEmpty()) { if (!errors.isEmpty()) {
@ -1069,6 +730,28 @@ public class PackWriter {
} }
} }
private void runTasks(ExecutorService pool, List<DeltaTask> tasks,
List<Throwable> errors) throws IOException {
List<Future<?>> futures = new ArrayList<Future<?>>(tasks.size());
for (DeltaTask task : tasks)
futures.add(pool.submit(task));
try {
for (Future<?> f : futures) {
try {
f.get();
} catch (ExecutionException failed) {
errors.add(failed.getCause());
}
}
} catch (InterruptedException ie) {
for (Future<?> f : futures)
f.cancel(true);
throw new IOException(
JGitText.get().packingCancelledDuringObjectsWriting);
}
}
private void writeObjects(ProgressMonitor writeMonitor, PackOutputStream out) private void writeObjects(ProgressMonitor writeMonitor, PackOutputStream out)
throws IOException { throws IOException {
for (List<ObjectToPack> list : objectsLists) { for (List<ObjectToPack> list : objectsLists) {
@ -1196,8 +879,8 @@ public class PackWriter {
private TemporaryBuffer.Heap delta(final ObjectToPack otp) private TemporaryBuffer.Heap delta(final ObjectToPack otp)
throws IOException { throws IOException {
DeltaIndex index = new DeltaIndex(buffer(reader, otp.getDeltaBaseId())); DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
byte[] res = buffer(reader, otp); byte[] res = buffer(otp);
// We never would have proposed this pair if the delta would be // We never would have proposed this pair if the delta would be
// larger than the unpacked version of the object. So using it // larger than the unpacked version of the object. So using it
@ -1208,7 +891,12 @@ public class PackWriter {
return delta; return delta;
} }
byte[] buffer(ObjectReader or, AnyObjectId objId) throws IOException { private byte[] buffer(AnyObjectId objId) throws IOException {
return buffer(config, reader, objId);
}
static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
throws IOException {
ObjectLoader ldr = or.open(objId); ObjectLoader ldr = or.open(objId);
if (!ldr.isLarge()) if (!ldr.isLarge())
return ldr.getCachedBytes(); return ldr.getCachedBytes();
@ -1221,7 +909,7 @@ public class PackWriter {
// If it really is too big to work with, abort out now. // If it really is too big to work with, abort out now.
// //
long sz = ldr.getSize(); long sz = ldr.getSize();
if (getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz) if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz)
throw new LargeObjectException(objId.copy()); throw new LargeObjectException(objId.copy());
// Its considered to be large by the loader, but we really // Its considered to be large by the loader, but we really
@ -1248,7 +936,7 @@ public class PackWriter {
private Deflater deflater() { private Deflater deflater() {
if (myDeflater == null) if (myDeflater == null)
myDeflater = new Deflater(compressionLevel); myDeflater = new Deflater(config.getCompressionLevel());
return myDeflater; return myDeflater;
} }
@ -1404,7 +1092,7 @@ public class PackWriter {
otp.clearDeltaBase(); otp.clearDeltaBase();
otp.clearReuseAsIs(); otp.clearReuseAsIs();
} }
} else if (nFmt == PACK_WHOLE && reuseObjects) { } else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
otp.clearDeltaBase(); otp.clearDeltaBase();
otp.setReuseAsIs(); otp.setReuseAsIs();
otp.setWeight(nWeight); otp.setWeight(nWeight);

4
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java

@ -48,8 +48,8 @@ import java.util.concurrent.locks.ReentrantLock;
class ThreadSafeDeltaCache extends DeltaCache { class ThreadSafeDeltaCache extends DeltaCache {
private final ReentrantLock lock; private final ReentrantLock lock;
ThreadSafeDeltaCache(PackWriter pw) { ThreadSafeDeltaCache(PackConfig pc) {
super(pw); super(pc);
lock = new ReentrantLock(); lock = new ReentrantLock();
} }

3
org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java

@ -231,7 +231,8 @@ class BasePackPushConnection extends BasePackConnection implements
List<ObjectId> newObjects = new ArrayList<ObjectId>(refUpdates.size()); List<ObjectId> newObjects = new ArrayList<ObjectId>(refUpdates.size());
final long start; final long start;
final PackWriter writer = new PackWriter(local); final PackWriter writer = new PackWriter(transport.getPackConfig(),
local.newObjectReader());
try { try {
for (final Ref r : getRefs()) for (final Ref r : getRefs())

22
org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java

@ -61,6 +61,7 @@ import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit; import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter; import org.eclipse.jgit.storage.pack.PackWriter;
/** /**
@ -81,12 +82,14 @@ import org.eclipse.jgit.storage.pack.PackWriter;
* overall bundle size. * overall bundle size.
*/ */
public class BundleWriter { public class BundleWriter {
private final PackWriter packWriter; private final Repository db;
private final Map<String, ObjectId> include; private final Map<String, ObjectId> include;
private final Set<RevCommit> assume; private final Set<RevCommit> assume;
private PackConfig packConfig;
/** /**
* Create a writer for a bundle. * Create a writer for a bundle.
* *
@ -94,11 +97,22 @@ public class BundleWriter {
* repository where objects are stored. * repository where objects are stored.
*/ */
public BundleWriter(final Repository repo) { public BundleWriter(final Repository repo) {
packWriter = new PackWriter(repo); db = repo;
include = new TreeMap<String, ObjectId>(); include = new TreeMap<String, ObjectId>();
assume = new HashSet<RevCommit>(); assume = new HashSet<RevCommit>();
} }
/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}
/** /**
* Include an object (and everything reachable from it) in the bundle. * Include an object (and everything reachable from it) in the bundle.
* *
@ -166,6 +180,10 @@ public class BundleWriter {
*/ */
public void writeBundle(ProgressMonitor monitor, OutputStream os) public void writeBundle(ProgressMonitor monitor, OutputStream os)
throws IOException { throws IOException {
PackConfig pc = packConfig;
if (pc == null)
pc = new PackConfig(db);
PackWriter packWriter = new PackWriter(pc, db.newObjectReader());
try { try {
final HashSet<ObjectId> inc = new HashSet<ObjectId>(); final HashSet<ObjectId> inc = new HashSet<ObjectId>();
final HashSet<ObjectId> exc = new HashSet<ObjectId>(); final HashSet<ObjectId> exc = new HashSet<ObjectId>();

15
org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java

@ -63,6 +63,7 @@ import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.RepositoryCache; import org.eclipse.jgit.lib.RepositoryCache;
import org.eclipse.jgit.lib.RepositoryCache.FileKey; import org.eclipse.jgit.lib.RepositoryCache.FileKey;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.util.FS; import org.eclipse.jgit.util.FS;
/** Basic daemon for the anonymous <code>git://</code> transport protocol. */ /** Basic daemon for the anonymous <code>git://</code> transport protocol. */
@ -90,6 +91,8 @@ public class Daemon {
private int timeout; private int timeout;
private PackConfig packConfig;
/** Configure a daemon to listen on any available network port. */ /** Configure a daemon to listen on any available network port. */
public Daemon() { public Daemon() {
this(null); this(null);
@ -120,6 +123,7 @@ public class Daemon {
final UploadPack rp = new UploadPack(db); final UploadPack rp = new UploadPack(db);
final InputStream in = dc.getInputStream(); final InputStream in = dc.getInputStream();
rp.setTimeout(Daemon.this.getTimeout()); rp.setTimeout(Daemon.this.getTimeout());
rp.setPackConfig(Daemon.this.packConfig);
rp.upload(in, dc.getOutputStream(), null); rp.upload(in, dc.getOutputStream(), null);
} }
}, new DaemonService("receive-pack", "receivepack") { }, new DaemonService("receive-pack", "receivepack") {
@ -242,6 +246,17 @@ public class Daemon {
timeout = seconds; timeout = seconds;
} }
/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}
/** /**
* Start this daemon on a background thread. * Start this daemon on a background thread.
* *

30
org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java

@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.TransferConfig; import org.eclipse.jgit.lib.TransferConfig;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.util.FS; import org.eclipse.jgit.util.FS;
/** /**
@ -554,6 +555,9 @@ public abstract class Transport {
/** Timeout in seconds to wait before aborting an IO read or write. */ /** Timeout in seconds to wait before aborting an IO read or write. */
private int timeout; private int timeout;
/** Pack configuration used by this transport to make pack file. */
private PackConfig packConfig;
/** /**
* Create a new transport instance. * Create a new transport instance.
* *
@ -791,6 +795,32 @@ public abstract class Transport {
timeout = seconds; timeout = seconds;
} }
/**
* Get the configuration used by the pack generator to make packs.
*
* If {@link #setPackConfig(PackConfig)} was previously given null a new
* PackConfig is created on demand by this method using the source
* repository's settings.
*
* @return the pack configuration. Never null.
*/
public PackConfig getPackConfig() {
if (packConfig == null)
packConfig = new PackConfig(local);
return packConfig;
}
/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
packConfig = pc;
}
/** /**
* Fetch objects and refs from the remote repository to the local one. * Fetch objects and refs from the remote repository to the local one.
* <p> * <p>

26
org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java

@ -69,6 +69,7 @@ import org.eclipse.jgit.revwalk.RevFlagSet;
import org.eclipse.jgit.revwalk.RevObject; import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevTag; import org.eclipse.jgit.revwalk.RevTag;
import org.eclipse.jgit.revwalk.RevWalk; import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter; import org.eclipse.jgit.storage.pack.PackWriter;
import org.eclipse.jgit.transport.BasePackFetchConnection.MultiAck; import org.eclipse.jgit.transport.BasePackFetchConnection.MultiAck;
import org.eclipse.jgit.transport.RefAdvertiser.PacketLineOutRefAdvertiser; import org.eclipse.jgit.transport.RefAdvertiser.PacketLineOutRefAdvertiser;
@ -102,6 +103,9 @@ public class UploadPack {
/** Revision traversal support over {@link #db}. */ /** Revision traversal support over {@link #db}. */
private final RevWalk walk; private final RevWalk walk;
/** Configuration to pass into the PackWriter. */
private PackConfig packConfig;
/** Timeout in seconds to wait for client interaction. */ /** Timeout in seconds to wait for client interaction. */
private int timeout; private int timeout;
@ -258,6 +262,17 @@ public class UploadPack {
this.refFilter = refFilter != null ? refFilter : RefFilter.DEFAULT; this.refFilter = refFilter != null ? refFilter : RefFilter.DEFAULT;
} }
/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}
/** /**
* Execute the upload task on the socket. * Execute the upload task on the socket.
* *
@ -548,8 +563,6 @@ public class UploadPack {
} }
private void sendPack() throws IOException { private void sendPack() throws IOException {
final boolean thin = options.contains(OPTION_THIN_PACK);
final boolean progress = !options.contains(OPTION_NO_PROGRESS);
final boolean sideband = options.contains(OPTION_SIDE_BAND) final boolean sideband = options.contains(OPTION_SIDE_BAND)
|| options.contains(OPTION_SIDE_BAND_64K); || options.contains(OPTION_SIDE_BAND_64K);
@ -563,15 +576,18 @@ public class UploadPack {
packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA,
bufsz, rawOut); bufsz, rawOut);
if (progress) if (!options.contains(OPTION_NO_PROGRESS))
pm = new SideBandProgressMonitor(new SideBandOutputStream( pm = new SideBandProgressMonitor(new SideBandOutputStream(
SideBandOutputStream.CH_PROGRESS, bufsz, rawOut)); SideBandOutputStream.CH_PROGRESS, bufsz, rawOut));
} }
final PackWriter pw = new PackWriter(db, walk.getObjectReader()); PackConfig cfg = packConfig;
if (cfg == null)
cfg = new PackConfig(db);
final PackWriter pw = new PackWriter(cfg, walk.getObjectReader());
try { try {
pw.setDeltaBaseAsOffset(options.contains(OPTION_OFS_DELTA)); pw.setDeltaBaseAsOffset(options.contains(OPTION_OFS_DELTA));
pw.setThin(thin); pw.setThin(options.contains(OPTION_THIN_PACK));
pw.preparePack(pm, wantAll, commonBase); pw.preparePack(pm, wantAll, commonBase);
if (options.contains(OPTION_INCLUDE_TAG)) { if (options.contains(OPTION_INCLUDE_TAG)) {
for (final Ref r : refs.values()) { for (final Ref r : refs.values()) {

24
org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java

@ -103,6 +103,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
/** Database connection to the remote repository. */ /** Database connection to the remote repository. */
private final WalkRemoteObjectDatabase dest; private final WalkRemoteObjectDatabase dest;
/** The configured transport we were constructed by. */
private final Transport transport;
/** /**
* Packs already known to reside in the remote repository. * Packs already known to reside in the remote repository.
* <p> * <p>
@ -123,9 +126,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
WalkPushConnection(final WalkTransport walkTransport, WalkPushConnection(final WalkTransport walkTransport,
final WalkRemoteObjectDatabase w) { final WalkRemoteObjectDatabase w) {
Transport t = (Transport)walkTransport; transport = (Transport) walkTransport;
local = t.local; local = transport.local;
uri = t.getURI(); uri = transport.getURI();
dest = w; dest = w;
} }
@ -209,7 +212,8 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
String pathPack = null; String pathPack = null;
String pathIdx = null; String pathIdx = null;
final PackWriter pw = new PackWriter(local); final PackWriter writer = new PackWriter(transport.getPackConfig(),
local.newObjectReader());
try { try {
final List<ObjectId> need = new ArrayList<ObjectId>(); final List<ObjectId> need = new ArrayList<ObjectId>();
final List<ObjectId> have = new ArrayList<ObjectId>(); final List<ObjectId> have = new ArrayList<ObjectId>();
@ -220,20 +224,20 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
if (r.getPeeledObjectId() != null) if (r.getPeeledObjectId() != null)
have.add(r.getPeeledObjectId()); have.add(r.getPeeledObjectId());
} }
pw.preparePack(monitor, need, have); writer.preparePack(monitor, need, have);
// We don't have to continue further if the pack will // We don't have to continue further if the pack will
// be an empty pack, as the remote has all objects it // be an empty pack, as the remote has all objects it
// needs to complete this change. // needs to complete this change.
// //
if (pw.getObjectsNumber() == 0) if (writer.getObjectsNumber() == 0)
return; return;
packNames = new LinkedHashMap<String, String>(); packNames = new LinkedHashMap<String, String>();
for (final String n : dest.getPackNames()) for (final String n : dest.getPackNames())
packNames.put(n, n); packNames.put(n, n);
final String base = "pack-" + pw.computeName().name(); final String base = "pack-" + writer.computeName().name();
final String packName = base + ".pack"; final String packName = base + ".pack";
pathPack = "pack/" + packName; pathPack = "pack/" + packName;
pathIdx = "pack/" + base + ".idx"; pathIdx = "pack/" + base + ".idx";
@ -254,7 +258,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
OutputStream os = dest.writeFile(pathPack, monitor, wt + "..pack"); OutputStream os = dest.writeFile(pathPack, monitor, wt + "..pack");
try { try {
os = new BufferedOutputStream(os); os = new BufferedOutputStream(os);
pw.writePack(monitor, monitor, os); writer.writePack(monitor, monitor, os);
} finally { } finally {
os.close(); os.close();
} }
@ -262,7 +266,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
os = dest.writeFile(pathIdx, monitor, wt + "..idx"); os = dest.writeFile(pathIdx, monitor, wt + "..idx");
try { try {
os = new BufferedOutputStream(os); os = new BufferedOutputStream(os);
pw.writeIndex(os); writer.writeIndex(os);
} finally { } finally {
os.close(); os.close();
} }
@ -282,7 +286,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
throw new TransportException(uri, JGitText.get().cannotStoreObjects, err); throw new TransportException(uri, JGitText.get().cannotStoreObjects, err);
} finally { } finally {
pw.release(); writer.release();
} }
} }

Loading…
Cancel
Save