diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/PackInserterTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/PackInserterTest.java new file mode 100644 index 000000000..1e9dd45b2 --- /dev/null +++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/PackInserterTest.java @@ -0,0 +1,421 @@ +/* + * Copyright (C) 2017, Google Inc. + * and other copyright owners as documented in the project's IP log. + * + * This program and the accompanying materials are made available + * under the terms of the Eclipse Distribution License v1.0 which + * accompanies this distribution, is reproduced below, and is + * available at http://www.eclipse.org/org/documents/edl-v10.php + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * - Neither the name of the Eclipse Foundation, Inc. nor the + * names of its contributors may be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.eclipse.jgit.internal.storage.file; + +import static java.util.Comparator.comparing; +import static java.util.stream.Collectors.toList; +import static org.eclipse.jgit.lib.Constants.OBJ_BLOB; +import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.eclipse.jgit.dircache.DirCache; +import org.eclipse.jgit.dircache.DirCacheBuilder; +import org.eclipse.jgit.dircache.DirCacheEntry; +import org.eclipse.jgit.junit.RepositoryTestCase; +import org.eclipse.jgit.lib.CommitBuilder; +import org.eclipse.jgit.lib.Constants; +import org.eclipse.jgit.lib.FileMode; +import org.eclipse.jgit.lib.ObjectId; +import org.eclipse.jgit.lib.ObjectLoader; +import org.eclipse.jgit.lib.ObjectReader; +import org.eclipse.jgit.treewalk.CanonicalTreeParser; +import org.junit.Before; +import org.junit.Test; + +@SuppressWarnings("boxing") +public class PackInserterTest extends RepositoryTestCase { + @Before + public void emptyAtSetUp() throws Exception { + assertEquals(0, listPacks().size()); + assertNoObjects(); + } + + @Test + public void noFlush() throws Exception { + try (PackInserter ins = newInserter()) { + ins.insert(OBJ_BLOB, Constants.encode("foo contents")); + // No flush. + } + assertNoObjects(); + } + + @Test + public void flushEmptyPack() throws Exception { + try (PackInserter ins = newInserter()) { + ins.flush(); + } + assertNoObjects(); + } + + @Test + public void singlePack() throws Exception { + ObjectId blobId; + byte[] blob = Constants.encode("foo contents"); + ObjectId treeId; + ObjectId commitId; + byte[] commit; + try (PackInserter ins = newInserter()) { + blobId = ins.insert(OBJ_BLOB, blob); + + DirCache dc = DirCache.newInCore(); + DirCacheBuilder b = dc.builder(); + DirCacheEntry dce = new DirCacheEntry("foo"); + dce.setFileMode(FileMode.REGULAR_FILE); + dce.setObjectId(blobId); + b.add(dce); + b.finish(); + treeId = dc.writeTree(ins); + + CommitBuilder cb = new CommitBuilder(); + cb.setTreeId(treeId); + cb.setAuthor(author); + cb.setCommitter(committer); + cb.setMessage("Commit message"); + commit = cb.toByteArray(); + commitId = ins.insert(cb); + ins.flush(); + } + + assertPacksOnly(); + List packs = listPacks(); + assertEquals(1, packs.size()); + assertEquals(3, packs.get(0).getObjectCount()); + + try (ObjectReader reader = db.newObjectReader()) { + assertBlob(reader, blobId, blob); + + CanonicalTreeParser treeParser = + new CanonicalTreeParser(null, reader, treeId); + assertEquals("foo", treeParser.getEntryPathString()); + assertEquals(blobId, treeParser.getEntryObjectId()); + + ObjectLoader commitLoader = reader.open(commitId); + assertEquals(OBJ_COMMIT, commitLoader.getType()); + assertArrayEquals(commit, commitLoader.getBytes()); + } + } + + @Test + public void multiplePacks() throws Exception { + ObjectId blobId1; + ObjectId blobId2; + byte[] blob1 = Constants.encode("blob1"); + byte[] blob2 = Constants.encode("blob2"); + + try (PackInserter ins = newInserter()) { + blobId1 = ins.insert(OBJ_BLOB, blob1); + ins.flush(); + blobId2 = ins.insert(OBJ_BLOB, blob2); + ins.flush(); + } + + assertPacksOnly(); + List packs = listPacks(); + assertEquals(2, packs.size()); + assertEquals(1, packs.get(0).getObjectCount()); + assertEquals(1, packs.get(1).getObjectCount()); + + try (ObjectReader reader = db.newObjectReader()) { + assertBlob(reader, blobId1, blob1); + assertBlob(reader, blobId2, blob2); + } + } + + @Test + public void largeBlob() throws Exception { + ObjectId blobId; + byte[] blob = newLargeBlob(); + try (PackInserter ins = newInserter()) { + assertThat(blob.length, greaterThan(ins.getBufferSize())); + blobId = + ins.insert(OBJ_BLOB, blob.length, new ByteArrayInputStream(blob)); + ins.flush(); + } + + assertPacksOnly(); + Collection packs = listPacks(); + assertEquals(1, packs.size()); + PackFile p = packs.iterator().next(); + assertEquals(1, p.getObjectCount()); + + try (ObjectReader reader = db.newObjectReader()) { + assertBlob(reader, blobId, blob); + } + } + + @Test + public void overwriteExistingPack() throws Exception { + ObjectId blobId; + byte[] blob = Constants.encode("foo contents"); + + try (PackInserter ins = newInserter()) { + blobId = ins.insert(OBJ_BLOB, blob); + ins.flush(); + } + + assertPacksOnly(); + List packs = listPacks(); + assertEquals(1, packs.size()); + PackFile pack = packs.get(0); + assertEquals(1, pack.getObjectCount()); + + String inode = getInode(pack.getPackFile()); + + try (PackInserter ins = newInserter()) { + ins.checkExisting(false); + assertEquals(blobId, ins.insert(OBJ_BLOB, blob)); + ins.flush(); + } + + assertPacksOnly(); + packs = listPacks(); + assertEquals(1, packs.size()); + pack = packs.get(0); + assertEquals(1, pack.getObjectCount()); + + if (inode != null) { + // Old file was overwritten with new file, although objects were + // equivalent. + assertNotEquals(inode, getInode(pack.getPackFile())); + } + } + + @Test + public void checkExisting() throws Exception { + ObjectId blobId; + byte[] blob = Constants.encode("foo contents"); + + try (PackInserter ins = newInserter()) { + blobId = ins.insert(OBJ_BLOB, blob); + ins.insert(OBJ_BLOB, Constants.encode("another blob")); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(1, listPacks().size()); + + try (PackInserter ins = newInserter()) { + assertEquals(blobId, ins.insert(OBJ_BLOB, blob)); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(1, listPacks().size()); + + try (PackInserter ins = newInserter()) { + ins.checkExisting(false); + assertEquals(blobId, ins.insert(OBJ_BLOB, blob)); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(2, listPacks().size()); + + try (ObjectReader reader = db.newObjectReader()) { + assertBlob(reader, blobId, blob); + } + } + + @Test + public void insertSmallInputStreamRespectsCheckExisting() throws Exception { + ObjectId blobId; + byte[] blob = Constants.encode("foo contents"); + try (PackInserter ins = newInserter()) { + assertThat(blob.length, lessThan(ins.getBufferSize())); + blobId = ins.insert(OBJ_BLOB, blob); + ins.insert(OBJ_BLOB, Constants.encode("another blob")); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(1, listPacks().size()); + + try (PackInserter ins = newInserter()) { + assertEquals(blobId, + ins.insert(OBJ_BLOB, blob.length, new ByteArrayInputStream(blob))); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(1, listPacks().size()); + } + + @Test + public void insertLargeInputStreamBypassesCheckExisting() throws Exception { + ObjectId blobId; + byte[] blob = newLargeBlob(); + + try (PackInserter ins = newInserter()) { + assertThat(blob.length, greaterThan(ins.getBufferSize())); + blobId = ins.insert(OBJ_BLOB, blob); + ins.insert(OBJ_BLOB, Constants.encode("another blob")); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(1, listPacks().size()); + + try (PackInserter ins = newInserter()) { + assertEquals(blobId, + ins.insert(OBJ_BLOB, blob.length, new ByteArrayInputStream(blob))); + ins.flush(); + } + + assertPacksOnly(); + assertEquals(2, listPacks().size()); + } + + private List listPacks() throws Exception { + List fromOpenDb = listPacks(db); + List reopened; + try (FileRepository db2 = new FileRepository(db.getDirectory())) { + reopened = listPacks(db2); + } + assertEquals(fromOpenDb.size(), reopened.size()); + for (int i = 0 ; i < fromOpenDb.size(); i++) { + PackFile a = fromOpenDb.get(i); + PackFile b = reopened.get(i); + assertEquals(a.getPackName(), b.getPackName()); + assertEquals( + a.getPackFile().getAbsolutePath(), b.getPackFile().getAbsolutePath()); + assertEquals(a.getObjectCount(), b.getObjectCount()); + a.getObjectCount(); + } + return fromOpenDb; + } + + private static List listPacks(FileRepository db) throws Exception { + return db.getObjectDatabase().getPacks().stream() + .sorted(comparing(PackFile::getPackName)).collect(toList()); + } + + private PackInserter newInserter() { + return db.getObjectDatabase().newPackInserter(); + } + + private static byte[] newLargeBlob() { + byte[] blob = new byte[10240]; + for (int i = 0; i < blob.length; i++) { + blob[i] = (byte) ('0' + (i % 10)); + } + return blob; + } + + private static String getInode(File f) throws Exception { + BasicFileAttributes attrs = Files.readAttributes( + f.toPath(), BasicFileAttributes.class); + Object k = attrs.fileKey(); + if (k == null) { + return null; + } + Pattern p = Pattern.compile("^\\(dev=[^,]*,ino=(\\d+)\\)$"); + Matcher m = p.matcher(k.toString()); + return m.matches() ? m.group(1) : null; + } + + private static void assertBlob(ObjectReader reader, ObjectId id, + byte[] expected) throws Exception { + ObjectLoader loader = reader.open(id); + assertEquals(OBJ_BLOB, loader.getType()); + assertEquals(expected.length, loader.getSize()); + assertArrayEquals(expected, loader.getBytes()); + } + + private void assertPacksOnly() throws Exception { + new BadFileCollector(f -> !f.endsWith(".pack") && !f.endsWith(".idx")) + .assertNoBadFiles(db.getObjectDatabase().getDirectory()); + } + + private void assertNoObjects() throws Exception { + new BadFileCollector(f -> true) + .assertNoBadFiles(db.getObjectDatabase().getDirectory()); + } + + private static class BadFileCollector extends SimpleFileVisitor { + private final Predicate badName; + private List bad; + + BadFileCollector(Predicate badName) { + this.badName = badName; + } + + void assertNoBadFiles(File f) throws IOException { + bad = new ArrayList<>(); + Files.walkFileTree(f.toPath(), this); + if (!bad.isEmpty()) { + fail("unexpected files in object directory: " + bad); + } + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + String name = file.getFileName().toString(); + if (!attrs.isDirectory() && badName.test(name)) { + bad.add(name); + } + return FileVisitResult.CONTINUE; + } + } +} diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/ObjectDirectory.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/ObjectDirectory.java index 7d351704d..153c7dd92 100644 --- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/ObjectDirectory.java +++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/ObjectDirectory.java @@ -220,6 +220,16 @@ public class ObjectDirectory extends FileObjectDatabase { return new ObjectDirectoryInserter(this, config); } + /** + * Create a new inserter that inserts all objects as pack files, not loose + * objects. + * + * @return new inserter. + */ + public PackInserter newPackInserter() { + return new PackInserter(this); + } + @Override public void close() { unpackedObjectCache.clear(); diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackInserter.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackInserter.java new file mode 100644 index 000000000..25baad04f --- /dev/null +++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackInserter.java @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2017, Google Inc. + * and other copyright owners as documented in the project's IP log. + * + * This program and the accompanying materials are made available + * under the terms of the Eclipse Distribution License v1.0 which + * accompanies this distribution, is reproduced below, and is + * available at http://www.eclipse.org/org/documents/edl-v10.php + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * - Neither the name of the Eclipse Foundation, Inc. nor the + * names of its contributors may be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.eclipse.jgit.internal.storage.file; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH; + +import java.io.EOFException; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.text.MessageFormat; +import java.util.Collections; +import java.util.List; +import java.util.zip.CRC32; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +import org.eclipse.jgit.internal.JGitText; +import org.eclipse.jgit.lib.Constants; +import org.eclipse.jgit.lib.ObjectId; +import org.eclipse.jgit.lib.ObjectIdOwnerMap; +import org.eclipse.jgit.lib.ObjectInserter; +import org.eclipse.jgit.lib.ObjectReader; +import org.eclipse.jgit.transport.PackParser; +import org.eclipse.jgit.transport.PackedObjectInfo; +import org.eclipse.jgit.util.BlockList; +import org.eclipse.jgit.util.FileUtils; +import org.eclipse.jgit.util.IO; +import org.eclipse.jgit.util.NB; +import org.eclipse.jgit.util.io.CountingOutputStream; +import org.eclipse.jgit.util.sha1.SHA1; + +/** + * Object inserter that inserts one pack per call to {@link #flush()}, and never + * inserts loose objects. + */ +class PackInserter extends ObjectInserter { + /** Always produce version 2 indexes, to get CRC data. */ + private static final int INDEX_VERSION = 2; + + private final ObjectDirectory db; + + private List objectList; + private ObjectIdOwnerMap objectMap; + private boolean rollback; + private boolean checkExisting = true; + + private int compression = Deflater.BEST_COMPRESSION; + private File tmpPack; + private PackStream packOut; + + PackInserter(ObjectDirectory db) { + this.db = db; + } + + /** + * @param check + * if false, will write out possibly-duplicate objects without + * first checking whether they exist in the repo; default is true. + */ + public void checkExisting(boolean check) { + checkExisting = check; + } + + /** + * @param compression + * compression level for zlib deflater. + */ + public void setCompressionLevel(int compression) { + this.compression = compression; + } + + int getBufferSize() { + return buffer().length; + } + + @Override + public ObjectId insert(int type, byte[] data, int off, int len) + throws IOException { + ObjectId id = idFor(type, data, off, len); + if (objectMap != null && objectMap.contains(id)) { + return id; + } + // Ignore loose objects, which are potentially unreachable. + if (checkExisting && db.hasPackedObject(id)) { + return id; + } + + long offset = beginObject(type, len); + packOut.compress.write(data, off, len); + packOut.compress.finish(); + return endObject(id, offset); + } + + @Override + public ObjectId insert(int type, long len, InputStream in) + throws IOException { + byte[] buf = buffer(); + if (len <= buf.length) { + IO.readFully(in, buf, 0, (int) len); + return insert(type, buf, 0, (int) len); + } + + long offset = beginObject(type, len); + SHA1 md = digest(); + md.update(Constants.encodedTypeString(type)); + md.update((byte) ' '); + md.update(Constants.encodeASCII(len)); + md.update((byte) 0); + + while (0 < len) { + int n = in.read(buf, 0, (int) Math.min(buf.length, len)); + if (n <= 0) { + throw new EOFException(); + } + md.update(buf, 0, n); + packOut.compress.write(buf, 0, n); + len -= n; + } + packOut.compress.finish(); + return endObject(md.toObjectId(), offset); + } + + private long beginObject(int type, long len) throws IOException { + if (packOut == null) { + beginPack(); + } + long offset = packOut.getOffset(); + packOut.beginObject(type, len); + return offset; + } + + private ObjectId endObject(ObjectId id, long offset) { + PackedObjectInfo obj = new PackedObjectInfo(id); + obj.setOffset(offset); + obj.setCRC((int) packOut.crc32.getValue()); + objectList.add(obj); + objectMap.addIfAbsent(obj); + return id; + } + + private static File idxFor(File packFile) { + String p = packFile.getName(); + return new File( + packFile.getParentFile(), + p.substring(0, p.lastIndexOf('.')) + ".idx"); //$NON-NLS-1$ + } + + private void beginPack() throws IOException { + objectList = new BlockList<>(); + objectMap = new ObjectIdOwnerMap<>(); + + rollback = true; + tmpPack = File.createTempFile("insert_", ".pack", db.getDirectory()); //$NON-NLS-1$ //$NON-NLS-2$ + packOut = new PackStream(tmpPack); + + // Write the header as though it were a single object pack. + packOut.write(packOut.hdrBuf, 0, writePackHeader(packOut.hdrBuf, 1)); + } + + private static int writePackHeader(byte[] buf, int objectCount) { + System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4); + NB.encodeInt32(buf, 4, 2); // Always use pack version 2. + NB.encodeInt32(buf, 8, objectCount); + return 12; + } + + @Override + public PackParser newPackParser(InputStream in) { + throw new UnsupportedOperationException(); + } + + @Override + public ObjectReader newReader() { + throw new UnsupportedOperationException(); + } + + @Override + public void flush() throws IOException { + if (tmpPack == null) { + return; + } + + if (packOut == null) { + throw new IOException(); + } + + byte[] packHash; + try { + packHash = packOut.finishPack(); + } finally { + packOut.close(); + packOut = null; + } + + Collections.sort(objectList); + File tmpIdx = idxFor(tmpPack); + writePackIndex(tmpIdx, packHash, objectList); + + File realPack = new File( + new File(db.getDirectory(), "pack"), //$NON-NLS-1$ + "pack-" + computeName(objectList).name() + ".pack"); //$NON-NLS-1$ //$NON-NLS-2$ + db.closeAllPackHandles(realPack); + tmpPack.setReadOnly(); + FileUtils.rename(tmpPack, realPack, ATOMIC_MOVE); + + File realIdx = idxFor(realPack); + tmpIdx.setReadOnly(); + try { + FileUtils.rename(tmpIdx, realIdx, ATOMIC_MOVE); + } catch (IOException e) { + File newIdx = new File( + realIdx.getParentFile(), realIdx.getName() + ".new"); //$NON-NLS-1$ + try { + FileUtils.rename(tmpIdx, newIdx, ATOMIC_MOVE); + } catch (IOException e2) { + newIdx = tmpIdx; + e = e2; + } + throw new IOException(MessageFormat.format( + JGitText.get().panicCantRenameIndexFile, newIdx, + realIdx), e); + } + + db.openPack(realPack); + rollback = false; + clear(); + } + + private static void writePackIndex(File idx, byte[] packHash, + List list) throws IOException { + try (OutputStream os = new FileOutputStream(idx)) { + PackIndexWriter w = PackIndexWriter.createVersion(os, INDEX_VERSION); + w.write(list, packHash); + } + } + + private ObjectId computeName(List list) { + SHA1 md = digest().reset(); + byte[] buf = buffer(); + for (PackedObjectInfo otp : list) { + otp.copyRawTo(buf, 0); + md.update(buf, 0, OBJECT_ID_LENGTH); + } + return ObjectId.fromRaw(md.digest()); + } + + @Override + public void close() { + try { + if (packOut != null) { + try { + packOut.close(); + } catch (IOException err) { + // Ignore a close failure, the pack should be removed. + } + } + if (rollback && tmpPack != null) { + try { + FileUtils.delete(tmpPack); + } catch (IOException e) { + // Still delete idx. + } + try { + FileUtils.delete(idxFor(tmpPack)); + } catch (IOException e) { + // Ignore error deleting temp idx. + } + rollback = false; + } + } finally { + clear(); + } + } + + private void clear() { + objectList = null; + objectMap = null; + tmpPack = null; + packOut = null; + } + + private class PackStream extends OutputStream { + final byte[] hdrBuf; + final CRC32 crc32; + final DeflaterOutputStream compress; + + private final RandomAccessFile file; + private final CountingOutputStream out; + private final Deflater deflater; + + PackStream(File pack) throws IOException { + file = new RandomAccessFile(pack, "rw"); //$NON-NLS-1$ + out = new CountingOutputStream(new FileOutputStream(file.getFD())); + deflater = new Deflater(compression); + compress = new DeflaterOutputStream(this, deflater, 8192); + hdrBuf = new byte[32]; + crc32 = new CRC32(); + } + + long getOffset() { + return out.getCount(); + } + + void beginObject(int objectType, long length) throws IOException { + crc32.reset(); + deflater.reset(); + write(hdrBuf, 0, encodeTypeSize(objectType, length)); + } + + private int encodeTypeSize(int type, long rawLength) { + long nextLength = rawLength >>> 4; + hdrBuf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (rawLength & 0x0F)); + rawLength = nextLength; + int n = 1; + while (rawLength > 0) { + nextLength >>>= 7; + hdrBuf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (rawLength & 0x7F)); + rawLength = nextLength; + } + return n; + } + + @Override + public void write(final int b) throws IOException { + hdrBuf[0] = (byte) b; + write(hdrBuf, 0, 1); + } + + @Override + public void write(byte[] data, int off, int len) throws IOException { + crc32.update(data, off, len); + out.write(data, off, len); + } + + byte[] finishPack() throws IOException { + // Overwrite placeholder header with actual object count, then hash. + file.seek(0); + write(hdrBuf, 0, writePackHeader(hdrBuf, objectList.size())); + + byte[] buf = buffer(); + SHA1 md = digest().reset(); + file.seek(0); + while (true) { + int r = file.read(buf); + if (r < 0) { + break; + } + md.update(buf, 0, r); + } + byte[] packHash = md.digest(); + out.write(packHash, 0, packHash.length); + return packHash; + } + + @Override + public void close() throws IOException { + deflater.end(); + out.close(); + file.close(); + } + } +}