FileWriter.Write will now always attempt to write the entire buffer, even if larger than max chunk size.
This is required to work with io.Copy, which returns an error if a write doesn't write the entire buffer but returns no error.
This commit is contained in:
parent
9803f7f9f3
commit
db28472ff7
|
@ -43,21 +43,31 @@ func encodePathAndMode(path string, mode os.FileMode) []byte {
|
||||||
|
|
||||||
// Write writes the min of (len(buf), 64k).
|
// Write writes the min of (len(buf), 64k).
|
||||||
func (w *syncFileWriter) Write(buf []byte) (n int, err error) {
|
func (w *syncFileWriter) Write(buf []byte) (n int, err error) {
|
||||||
// Writes < 64k have a one-to-one mapping to chunks.
|
written := 0
|
||||||
// If buffer is larger than the max, we'll return the max size and leave it up to the
|
|
||||||
// caller to handle correctly.
|
// If buf > 64k we'll have to send multiple chunks.
|
||||||
if len(buf) > wire.SyncMaxChunkSize {
|
// TODO Refactor this into something that can coalesce smaller writes into a single chukn.
|
||||||
buf = buf[:wire.SyncMaxChunkSize]
|
for len(buf) > 0 {
|
||||||
|
// Writes < 64k have a one-to-one mapping to chunks.
|
||||||
|
// If buffer is larger than the max, we'll return the max size and leave it up to the
|
||||||
|
// caller to handle correctly.
|
||||||
|
partialBuf := buf
|
||||||
|
if len(partialBuf) > wire.SyncMaxChunkSize {
|
||||||
|
partialBuf = partialBuf[:wire.SyncMaxChunkSize]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.sender.SendOctetString(wire.StatusSyncData); err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if err := w.sender.SendBytes(partialBuf); err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
written += len(partialBuf)
|
||||||
|
buf = buf[len(partialBuf):]
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.sender.SendOctetString(wire.StatusSyncData); err != nil {
|
return written, nil
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := w.sender.SendBytes(buf); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(buf), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *syncFileWriter) Close() error {
|
func (w *syncFileWriter) Close() error {
|
||||||
|
|
|
@ -42,18 +42,26 @@ func TestFileWriterWriteLargeChunk(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
writer := newSyncFileWriter(wire.NewSyncSender(&buf), MtimeOfClose)
|
writer := newSyncFileWriter(wire.NewSyncSender(&buf), MtimeOfClose)
|
||||||
|
|
||||||
|
// Send just enough data to get 2 chunks.
|
||||||
data := make([]byte, wire.SyncMaxChunkSize+1)
|
data := make([]byte, wire.SyncMaxChunkSize+1)
|
||||||
n, err := writer.Write(data)
|
n, err := writer.Write(data)
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, wire.SyncMaxChunkSize, n)
|
assert.Equal(t, wire.SyncMaxChunkSize+1, n)
|
||||||
assert.Equal(t, 8 + wire.SyncMaxChunkSize, buf.Len())
|
assert.Equal(t, 8 + 8 + wire.SyncMaxChunkSize+1, buf.Len())
|
||||||
|
|
||||||
expectedHeader := []byte("DATA0000")
|
// First header.
|
||||||
|
chunk := buf.Bytes()[:8+wire.SyncMaxChunkSize]
|
||||||
|
expectedHeader := []byte("DATA----")
|
||||||
binary.LittleEndian.PutUint32(expectedHeader[4:], wire.SyncMaxChunkSize)
|
binary.LittleEndian.PutUint32(expectedHeader[4:], wire.SyncMaxChunkSize)
|
||||||
assert.Equal(t, expectedHeader, buf.Bytes()[:8])
|
assert.Equal(t, expectedHeader, chunk[:8])
|
||||||
|
assert.Equal(t, data[:wire.SyncMaxChunkSize], chunk[8:])
|
||||||
|
|
||||||
assert.Equal(t, string(data[:wire.SyncMaxChunkSize]), buf.String()[8:])
|
// Second header.
|
||||||
|
chunk = buf.Bytes()[wire.SyncMaxChunkSize+8:wire.SyncMaxChunkSize+8+1]
|
||||||
|
expectedHeader = []byte("DATA\000\000\000\000")
|
||||||
|
binary.LittleEndian.PutUint32(expectedHeader[4:], 1)
|
||||||
|
assert.Equal(t, expectedHeader, chunk[:8])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileWriterCloseEmpty(t *testing.T) {
|
func TestFileWriterCloseEmpty(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue