Commit 13f08f3e authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 8d830b20
Simplified BSD License
Copyright (c) 2012, Google Inc.
Copyright (c) 2016, Datadog <info@datadoghq.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Pulled from https://github.com/youtube/vitess 229422035ca0c716ad0c1397ea1351fe62b0d35a
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package czlib
// NOTE: the routines defined in this file are used for verification in
// czlib_test.go, but you cannot use cgo in test files, so they are
// defined here despite not being exposed.
// #cgo pkg-config: zlib
/*
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type adler32Hash struct {
adler C.uLong
}
// an empty buffer has an adler32 of '1' by default, so start with that
// (the go hash/adler32 does the same)
func newAdler32() hash.Hash32 {
a := &adler32Hash{}
a.Reset()
return a
}
// Write implements an io.Writer interface
func (a *adler32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.adler = C.adler32(a.adler, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// Sum implements a hash.Hash interface
func (a *adler32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
// Reset resets the hash to default value
func (a *adler32Hash) Reset() {
a.adler = C.adler32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
// Size returns the (fixed) size of the hash
func (a *adler32Hash) Size() int {
return 4
}
// BlockSize returns the (fixed) block size
func (a *adler32Hash) BlockSize() int {
return 1
}
// Sum32 implements a hash.Hash32 interface
func (a *adler32Hash) Sum32() uint32 {
return uint32(a.adler)
}
// helper method for partial checksums. From the zlib.h header:
//
// Combine two Adler-32 checksums into one. For two sequences of bytes, seq1
// and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for
// each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of
// seq1 and seq2 concatenated, requiring only adler1, adler2, and len2.
func adler32Combine(adler1, adler2 uint32, len2 int) uint32 {
return uint32(C.adler32_combine(C.uLong(adler1), C.uLong(adler2), C.z_off_t(len2)))
}
// Pulled from https://github.com/youtube/vitess 229422035ca0c716ad0c1397ea1351fe62b0d35a
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package czlib
// NOTE: the routines defined in this file are used for verification in
// czlib_test.go, but you cannot use cgo in test files, so they are
// defined here despite not being exposed.
/*
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type crc32Hash struct {
crc C.uLong
}
// an empty buffer has an crc32 of '1' by default, so start with that
// (the go hash/crc32 does the same)
func newCrc32() hash.Hash32 {
c := &crc32Hash{}
c.Reset()
return c
}
// Write implements an io.Writer interface
func (a *crc32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.crc = C.crc32(a.crc, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// Sum implements a hash.Hash interface
func (a *crc32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
// Reset resets the hash to default value
func (a *crc32Hash) Reset() {
a.crc = C.crc32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
// Size returns the (fixed) size of the hash
func (a *crc32Hash) Size() int {
return 4
}
// BlockSize returns the (fixed) block size of the hash
func (a *crc32Hash) BlockSize() int {
return 1
}
// Sum32 implements a hash.Hash32 interface
func (a *crc32Hash) Sum32() uint32 {
return uint32(a.crc)
}
// helper method for partial checksums. From the zlib.h header:
//
// Combine two CRC-32 checksums into one. For two sequences of bytes, seq1
// and seq2 with lengths len1 and len2, CRC-32 checksums were calculated for
// each, crc1 and crc2. crc32_combine() returns the CRC-32 checksum of
// seq1 and seq2 concatenated, requiring only crc1, crc2, and len2.
func crc32Combine(crc1, crc2 uint32, len2 int) uint32 {
return uint32(C.crc32_combine(C.uLong(crc1), C.uLong(crc2), C.z_off_t(len2)))
}
// Copyright 2016, Datadog Inc. All rights reserved.
package czlib
import (
"compress/flate"
"compress/zlib"
)
// Constants copied from the flate package, so that code that imports czlib
// does not also have to import "compress/flate".
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
)
var (
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
ErrChecksum = zlib.ErrChecksum
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
ErrDictionary = zlib.ErrDictionary
// ErrHeader is returned when reading ZLIB data that has an invalid header.
ErrHeader = zlib.ErrHeader
)
This diff is collapsed.
// Copyright 2013, Datadog Inc. All rights reserved.
package czlib
import (
"errors"
"unsafe"
)
/*
#cgo LDFLAGS: -lz
#include "fastzlib.h"
#include <stdlib.h>
*/
import "C"
// An UnsafeByte is a []byte whose backing array has been allocated in C and
// thus is not subject to the Go garbage collector. The Unsafe versions of
// Compress and Decompress return this in order to prevent copying the unsafe
// memory into collected memory.
type UnsafeByte []byte
// NewUnsafeByte creates a []byte from the unsafe pointer without a copy,
// using the method outlined in this mailing list post:
// https://groups.google.com/forum/#!topic/golang-nuts/KyXR0fDp0HA
// but amended to use the three-index slices from go1.2 to set the capacity
// of b correctly:
// https://tip.golang.org/doc/go1.2#three_index
// This means this code only works in go1.2+.
//
// This shouldn't copy the underlying array; it's just casting it
// Afterwards, we use reflect to fix the Cap & len of the slice.
func NewUnsafeByte(p *C.char, length int) UnsafeByte {
var b UnsafeByte
b = UnsafeByte((*[1<<31 - 1]byte)(unsafe.Pointer(p))[:length:length])
return b
}
// Free the underlying byte array; doing this twice would be bad.
func (b UnsafeByte) Free() {
C.free(unsafe.Pointer(&b[0]))
}
// Compress returns the input compressed using zlib, or an error if encountered.
func Compress(input []byte) ([]byte, error) {
var cInput *C.char
if len(input) != 0 {
cInput = (*C.char)(unsafe.Pointer(&input[0]))
}
ret := C.c_compress2(cInput, C.uint(len(input)))
// if there was an error compressing, return it and free the original message
if ret.err != nil {
msg := C.GoString((*C.char)(ret.err))
C.free(unsafe.Pointer(ret.err))
return []byte{}, errors.New(msg)
}
// NOTE: this creates a copy of the return *char as a Go []byte.
// FIXME: uint -> int conversion here is dangerous
b := C.GoBytes(unsafe.Pointer(ret.str), C.int(ret.len))
C.free(unsafe.Pointer(ret.str))
return b, nil
}
// Decompress returns the input decompressed using zlib, or an error if encountered.
func Decompress(input []byte) ([]byte, error) {
var cInput *C.char
if len(input) != 0 {
cInput = (*C.char)(unsafe.Pointer(&input[0]))
}
// send the input byte without copying iy
ret := C.c_decompress(cInput, C.uint(len(input)))
// if there was an error decompressing, return it and free the original message
if ret.err != nil {
msg := C.GoString((*C.char)(ret.err))
C.free(unsafe.Pointer(ret.err))
return []byte{}, errors.New(msg)
}
// NOTE: this creates a copy of the return *char as a Go []byte.
// FIXME: uint -> int conversion here is dangerous
b := C.GoBytes(unsafe.Pointer(ret.str), C.int(ret.len))
C.free(unsafe.Pointer(ret.str))
return b, nil
}
// UnsafeDecompress unzips input into an UnsafeByte without copying the result
// malloced in C. The UnsafeByte returned can be used as a normal []byte but
// must be manually free'd w/ UnsafeByte.Free()
func UnsafeDecompress(input []byte) (UnsafeByte, error) {
cInput := (*C.char)(unsafe.Pointer(&input[0]))
ret := C.c_decompress(cInput, C.uint(len(input)))
// if there was an error decompressing, return it and free the original message
if ret.err != nil {
msg := C.GoString((*C.char)(ret.err))
C.free(unsafe.Pointer(ret.err))
return UnsafeByte{}, errors.New(msg)
}
b := NewUnsafeByte((*C.char)(ret.str), int(ret.len))
return b, nil
}
// UnsafeCompress zips input into an UnsafeByte without copying the result
// malloced in C. The UnsafeByte returned can be used as a normal []byte but must
// be manually free'd w/ UnsafeByte.Free()
func UnsafeCompress(input []byte) (UnsafeByte, error) {
cInput := (*C.char)(unsafe.Pointer(&input[0]))
ret := C.c_compress(cInput, C.uint(len(input)))
// if there was an error decompressing, return it and free the original message
if ret.err != nil {
msg := C.GoString((*C.char)(ret.err))
C.free(unsafe.Pointer(ret.err))
return UnsafeByte{}, errors.New(msg)
}
b := NewUnsafeByte((*C.char)(ret.str), int(ret.len))
return b, nil
}
typedef unsigned int uint;
/* simulate the Go return type ([]byte, error) so that the Go function
* can allocate the right amt of memory to copy the str if required or
* report errors directly from the C lib.
*/
typedef struct {
char *str;
uint len;
char *err;
} ByteArray;
ByteArray c_decompress(char *input, uint length);
ByteArray c_compress(char *input, uint length);
ByteArray c_compress2(char *input, uint length);
// Pulled from https://github.com/youtube/vitess 229422035ca0c716ad0c1397ea1351fe62b0d35a
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package czlib
import "io"
// err starts out as nil
// we will call inflateEnd when we set err to a value:
// - whatever error is returned by the underlying reader
// - io.EOF if Close was called
type reader struct {
r io.Reader
in []byte
strm zstream
err error
skipIn bool
}
// NewReader creates a new io.ReadCloser. Reads from the returned io.ReadCloser
//read and decompress data from r. The implementation buffers input and may read
// more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when done.
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderBuffer(r, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
// NewReaderBuffer has the same behavior as NewReader but the user can provides
// a custom buffer size.
func NewReaderBuffer(r io.Reader, bufferSize int) (io.ReadCloser, error) {
z := &reader{r: r, in: make([]byte, bufferSize)}
if err := z.strm.inflateInit(); err != nil {
return nil, err
}
return z, nil
}
func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
if len(p) == 0 {
return 0, nil
}
// read and deflate until the output buffer is full
z.strm.setOutBuf(p, len(p))
for {
// if we have no data to inflate, read more
if !z.skipIn && z.strm.availIn() == 0 {
var n int
n, z.err = z.r.Read(z.in)
// If we got data and EOF, pretend we didn't get the
// EOF. That way we will return the right values
// upstream. Note this will trigger another read
// later on, that should return (0, EOF).
if n > 0 && z.err == io.EOF {
z.err = nil
}
// FIXME(alainjobart) this code is not compliant with
// the Reader interface. We should process all the
// data we got from the reader, and then return the
// error, whatever it is.
if (z.err != nil && z.err != io.EOF) || (n == 0 && z.err == io.EOF) {
z.strm.inflateEnd()
return 0, z.err
}
z.strm.setInBuf(z.in, n)
} else {
z.skipIn = false
}
// inflate some
ret, err := z.strm.inflate(zNoFlush)
if err != nil {
z.err = err
z.strm.inflateEnd()
return 0, z.err
}
// if we read something, we're good
have := len(p) - z.strm.availOut()
if have > 0 {
z.skipIn = ret == Z_OK && z.strm.availOut() == 0
return have, z.err
}
}
}
// Close closes the Reader. It does not close the underlying io.Reader.
func (z *reader) Close() error {
if z.err != nil {
if z.err != io.EOF {
return z.err
}
return nil
}
z.strm.inflateEnd()
z.err = io.EOF
return nil
}
// Pulled from https://github.com/youtube/vitess 229422035ca0c716ad0c1397ea1351fe62b0d35a
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package czlib
import (
"fmt"
"io"
)
// Allowed flush values
const (
Z_NO_FLUSH = 0
Z_PARTIAL_FLUSH = 1
Z_SYNC_FLUSH = 2
Z_FULL_FLUSH = 3
Z_FINISH = 4
Z_BLOCK = 5
Z_TREES = 6
)
// Return codes
const (
Z_OK = 0
Z_STREAM_END = 1
Z_NEED_DICT = 2
Z_ERRNO = -1
Z_STREAM_ERROR = -2
Z_DATA_ERROR = -3
Z_MEM_ERROR = -4
Z_BUF_ERROR = -5
Z_VERSION_ERROR = -6
)
// our default buffer size
// most go io functions use 32KB as buffer size, so 32KB
// works well here for compressed data buffer
const (
DEFAULT_COMPRESSED_BUFFER_SIZE = 32 * 1024
)
// Writer implements a io.WriteCloser
// we will call deflateEnd when we set err to a value:
// - whatever error is returned by the underlying writer
// - io.EOF if Close was called
type Writer struct {
w io.Writer
out []byte
strm zstream
err error
}
// NewWriter returns a new zlib writer that writes to the underlying writer
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevelBuffer(w, DefaultCompression, DEFAULT_COMPRESSED_BUFFER_SIZE)
return z
}
// NewWriterLevel let the user provide a compression level value
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelBuffer(w, level, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
// NewWriterLevelBuffer let the user provide compression level and buffer size values
func NewWriterLevelBuffer(w io.Writer, level, bufferSize int) (*Writer, error) {
z := &Writer{w: w, out: make([]byte, bufferSize)}
if err := z.strm.deflateInit(level); err != nil {
return nil, err
}
return z, nil
}
// this is the main function: it advances the write with either
// new data or something else to do, like a flush
func (z *Writer) write(p []byte, flush int) int {
if len(p) == 0 {
z.strm.setInBuf(nil, 0)
} else {
z.strm.setInBuf(p, len(p))
}
// we loop until we don't get a full output buffer
// each loop completely writes the output buffer to the underlying
// writer
for {
// deflate one buffer
z.strm.setOutBuf(z.out, len(z.out))
z.strm.deflate(flush)
// write everything
from := 0
have := len(z.out) - int(z.strm.availOut())
for have > 0 {
var n int
n, z.err = z.w.Write(z.out[from:have])
if z.err != nil {
z.strm.deflateEnd()
return 0
}
from += n
have -= n
}
// we stop trying if we get a partial response
if z.strm.availOut() != 0 {
break
}
}
// the library guarantees this
if z.strm.availIn() != 0 {
panic(fmt.Errorf("cgzip: Unexpected error (2)"))
}
return len(p)
}
// Write implements the io.Writer interface
func (z *Writer) Write(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
n = z.write(p, Z_NO_FLUSH)
return n, z.err
}
// Flush let the user flush the zlib buffer to the underlying writer buffer
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_SYNC_FLUSH)
return z.err
}
// Close closes the zlib buffer but does not close the wrapped io.Writer originally
// passed to NewWriterX.
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_FINISH)
if z.err != nil {
return z.err
}
z.strm.deflateEnd()
z.err = io.EOF
return nil
}
// Pulled from https://github.com/youtube/vitess 229422035ca0c716ad0c1397ea1351fe62b0d35a
// Copyright 2015, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package czlib
// See http://www.zlib.net/zlib_how.html for more information on this
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
// inflateInit is a macro, so using a wrapper function
int zstream_inflate_init(char *strm) {
((z_stream*)strm)->zalloc = Z_NULL;
((z_stream*)strm)->zfree = Z_NULL;
((z_stream*)strm)->opaque = Z_NULL;
((z_stream*)strm)->avail_in = 0;
((z_stream*)strm)->next_in = Z_NULL;
return inflateInit((z_stream*)strm);
}
// deflateInit is a macro, so using a wrapper function
int zstream_deflate_init(char *strm, int level) {
((z_stream*)strm)->zalloc = Z_NULL;
((z_stream*)strm)->zfree = Z_NULL;
((z_stream*)strm)->opaque = Z_NULL;
return deflateInit((z_stream*)strm, level);
}
unsigned int zstream_avail_in(char *strm) {
return ((z_stream*)strm)->avail_in;
}
unsigned int zstream_avail_out(char *strm) {
return ((z_stream*)strm)->avail_out;
}
char* zstream_msg(char *strm) {
return ((z_stream*)strm)->msg;
}
void zstream_set_in_buf(char *strm, void *buf, unsigned int len) {
((z_stream*)strm)->next_in = (Bytef*)buf;
((z_stream*)strm)->avail_in = len;
}
void zstream_set_out_buf(char *strm, void *buf, unsigned int len) {
((z_stream*)strm)->next_out = (Bytef*)buf;
((z_stream*)strm)->avail_out = len;
}
int zstream_inflate(char *strm, int flag) {
return inflate((z_stream*)strm, flag);
}
int zstream_deflate(char *strm, int flag) {
return deflate((z_stream*)strm, flag);
}
void zstream_inflate_end(char *strm) {
inflateEnd((z_stream*)strm);
}
void zstream_deflate_end(char *strm) {
deflateEnd((z_stream*)strm);
}
*/
import "C"
import (
"fmt"
"unsafe"
)
const (
zNoFlush = C.Z_NO_FLUSH
)
// z_stream is a buffer that's big enough to fit a C.z_stream.
// This lets us allocate a C.z_stream within Go, while keeping the contents
// opaque to the Go GC. Otherwise, the GC would look inside and complain that
// the pointers are invalid, since they point to objects allocated by C code.
type zstream [unsafe.Sizeof(C.z_stream{})]C.char
func (strm *zstream) inflateInit() error {
result := C.zstream_inflate_init(&strm[0])
if result != Z_OK {
return fmt.Errorf("cgzip: failed to initialize inflate (%v): %v", result, strm.msg())
}
return nil
}
func (strm *zstream) deflateInit(level int) error {
result := C.zstream_deflate_init(&strm[0], C.int(level))
if result != Z_OK {
return fmt.Errorf("cgzip: failed to initialize deflate (%v): %v", result, strm.msg())
}
return nil
}
func (strm *zstream) inflateEnd() {
C.zstream_inflate_end(&strm[0])
}
func (strm *zstream) deflateEnd() {
C.zstream_deflate_end(&strm[0])
}
func (strm *zstream) availIn() int {
return int(C.zstream_avail_in(&strm[0]))
}
func (strm *zstream) availOut() int {
return int(C.zstream_avail_out(&strm[0]))
}
func (strm *zstream) msg() string {
return C.GoString(C.zstream_msg(&strm[0]))
}
func (strm *zstream) setInBuf(buf []byte, size int) {
if buf == nil {
C.zstream_set_in_buf(&strm[0], nil, C.uint(size))
} else {
C.zstream_set_in_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size))
}
}
func (strm *zstream) setOutBuf(buf []byte, size int) {
if buf == nil {
C.zstream_set_out_buf(&strm[0], nil, C.uint(size))
} else {
C.zstream_set_out_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size))
}
}
func (strm *zstream) inflate(flag int) (int, error) {
ret := C.zstream_inflate(&strm[0], C.int(flag))
switch ret {
case Z_NEED_DICT:
ret = Z_DATA_ERROR
fallthrough
case Z_DATA_ERROR, Z_MEM_ERROR:
return int(ret), fmt.Errorf("cgzip: failed to inflate (%v): %v", ret, strm.msg())
}
return int(ret), nil
}
func (strm *zstream) deflate(flag int) {
ret := C.zstream_deflate(&strm[0], C.int(flag))
if ret == Z_STREAM_ERROR {
// all the other error cases are normal,
// and this should never happen
panic(fmt.Errorf("cgzip: Unexpected error (1)"))
}
}
Copyright (c) 2014-2015, Philip Hofer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
# fwd
import "github.com/philhofer/fwd"
The `fwd` package provides a buffered reader
and writer. Each has methods that help improve
the encoding/decoding performance of some binary
protocols.
The `fwd.Writer` and `fwd.Reader` type provide similar
functionality to their counterparts in `bufio`, plus
a few extra utility methods that simplify read-ahead
and write-ahead. I wrote this package to improve serialization
performance for <a href="http://github.com/tinylib/msgp">http://github.com/tinylib/msgp</a>,
where it provided about a 2x speedup over `bufio` for certain
workloads. However, care must be taken to understand the semantics of the
extra methods provided by this package, as they allow
the user to access and manipulate the buffer memory
directly.
The extra methods for `fwd.Reader` are `Peek`, `Skip`
and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
will re-allocate the read buffer in order to accommodate arbitrarily
large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
in the stream, and uses the `io.Seeker` interface if the underlying
stream implements it. `(*fwd.Reader).Next` returns a slice pointing
to the next `n` bytes in the read buffer (like `Peek`), but also
increments the read position. This allows users to process streams
in arbitrary block sizes without having to manage appropriately-sized
slices. Additionally, obviating the need to copy the data from the
buffer to another location in memory can improve performance dramatically
in CPU-bound applications.
`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
returns a slice pointing to the next `n` bytes of the writer, and increments
the write position by the length of the returned slice. This allows users
to write directly to the end of the buffer.
## Constants
``` go
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
)
```
``` go
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
)
```
## type Reader
``` go
type Reader struct {
// contains filtered or unexported fields
}
```
Reader is a buffered look-ahead reader
### func NewReader
``` go
func NewReader(r io.Reader) *Reader
```
NewReader returns a new *Reader that reads from 'r'
### func NewReaderSize
``` go
func NewReaderSize(r io.Reader, n int) *Reader
```
NewReaderSize returns a new *Reader that
reads from 'r' and has a buffer size 'n'
### func (\*Reader) BufferSize
``` go
func (r *Reader) BufferSize() int
```
BufferSize returns the total size of the buffer
### func (\*Reader) Buffered
``` go
func (r *Reader) Buffered() int
```
Buffered returns the number of bytes currently in the buffer
### func (\*Reader) Next
``` go
func (r *Reader) Next(n int) ([]byte, error)
```
Next returns the next 'n' bytes in the stream.
Unlike Peek, Next advances the reader position.
The returned bytes point to the same
data as the buffer, so the slice is
only valid until the next reader method call.
An EOF is considered an unexpected error.
If an the returned slice is less than the
length asked for, an error will be returned,
and the reader position will not be incremented.
### func (\*Reader) Peek
``` go
func (r *Reader) Peek(n int) ([]byte, error)
```
Peek returns the next 'n' buffered bytes,
reading from the underlying reader if necessary.
It will only return a slice shorter than 'n' bytes
if it also returns an error. Peek does not advance
the reader. EOF errors are *not* returned as
io.ErrUnexpectedEOF.
### func (\*Reader) Read
``` go
func (r *Reader) Read(b []byte) (int, error)
```
Read implements `io.Reader`
### func (\*Reader) ReadByte
``` go
func (r *Reader) ReadByte() (byte, error)
```
ReadByte implements `io.ByteReader`
### func (\*Reader) ReadFull
``` go
func (r *Reader) ReadFull(b []byte) (int, error)
```
ReadFull attempts to read len(b) bytes into
'b'. It returns the number of bytes read into
'b', and an error if it does not return len(b).
EOF is considered an unexpected error.
### func (\*Reader) Reset
``` go
func (r *Reader) Reset(rd io.Reader)
```
Reset resets the underlying reader
and the read buffer.
### func (\*Reader) Skip
``` go
func (r *Reader) Skip(n int) (int, error)
```
Skip moves the reader forward 'n' bytes.
Returns the number of bytes skipped and any
errors encountered. It is analogous to Seek(n, 1).
If the underlying reader implements io.Seeker, then
that method will be used to skip forward.
If the reader encounters
an EOF before skipping 'n' bytes, it
returns io.ErrUnexpectedEOF. If the
underlying reader implements io.Seeker, then
those rules apply instead. (Many implementations
will not return `io.EOF` until the next call
to Read.)
### func (\*Reader) WriteTo
``` go
func (r *Reader) WriteTo(w io.Writer) (int64, error)
```
WriteTo implements `io.WriterTo`
## type Writer
``` go
type Writer struct {
// contains filtered or unexported fields
}
```
Writer is a buffered writer
### func NewWriter
``` go
func NewWriter(w io.Writer) *Writer
```
NewWriter returns a new writer
that writes to 'w' and has a buffer
that is `DefaultWriterSize` bytes.
### func NewWriterSize
``` go
func NewWriterSize(w io.Writer, size int) *Writer
```
NewWriterSize returns a new writer
that writes to 'w' and has a buffer
that is 'size' bytes.
### func (\*Writer) BufferSize
``` go
func (w *Writer) BufferSize() int
```
BufferSize returns the maximum size of the buffer.
### func (\*Writer) Buffered
``` go
func (w *Writer) Buffered() int
```
Buffered returns the number of buffered bytes
in the reader.
### func (\*Writer) Flush
``` go
func (w *Writer) Flush() error
```
Flush flushes any buffered bytes
to the underlying writer.
### func (\*Writer) Next
``` go
func (w *Writer) Next(n int) ([]byte, error)
```
Next returns the next 'n' free bytes
in the write buffer, flushing the writer
as necessary. Next will return `io.ErrShortBuffer`
if 'n' is greater than the size of the write buffer.
Calls to 'next' increment the write position by
the size of the returned buffer.
### func (\*Writer) ReadFrom
``` go
func (w *Writer) ReadFrom(r io.Reader) (int64, error)
```
ReadFrom implements `io.ReaderFrom`
### func (\*Writer) Write
``` go
func (w *Writer) Write(p []byte) (int, error)
```
Write implements `io.Writer`
### func (\*Writer) WriteByte
``` go
func (w *Writer) WriteByte(b byte) error
```
WriteByte implements `io.ByteWriter`
### func (\*Writer) WriteString
``` go
func (w *Writer) WriteString(s string) (int, error)
```
WriteString is analogous to Write, but it takes a string.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
// The `fwd` package provides a buffered reader
// and writer. Each has methods that help improve
// the encoding/decoding performance of some binary
// protocols.
//
// The `fwd.Writer` and `fwd.Reader` type provide similar
// functionality to their counterparts in `bufio`, plus
// a few extra utility methods that simplify read-ahead
// and write-ahead. I wrote this package to improve serialization
// performance for http://github.com/tinylib/msgp,
// where it provided about a 2x speedup over `bufio` for certain
// workloads. However, care must be taken to understand the semantics of the
// extra methods provided by this package, as they allow
// the user to access and manipulate the buffer memory
// directly.
//
// The extra methods for `fwd.Reader` are `Peek`, `Skip`
// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
// will re-allocate the read buffer in order to accommodate arbitrarily
// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
// in the stream, and uses the `io.Seeker` interface if the underlying
// stream implements it. `(*fwd.Reader).Next` returns a slice pointing
// to the next `n` bytes in the read buffer (like `Peek`), but also
// increments the read position. This allows users to process streams
// in arbitrary block sizes without having to manage appropriately-sized
// slices. Additionally, obviating the need to copy the data from the
// buffer to another location in memory can improve performance dramatically
// in CPU-bound applications.
//
// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
// returns a slice pointing to the next `n` bytes of the writer, and increments
// the write position by the length of the returned slice. This allows users
// to write directly to the end of the buffer.
//
package fwd
import "io"
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
// minimum read buffer; straight from bufio
minReaderSize = 16
)
// NewReader returns a new *Reader that reads from 'r'
func NewReader(r io.Reader) *Reader {
return NewReaderSize(r, DefaultReaderSize)
}
// NewReaderSize returns a new *Reader that
// reads from 'r' and has a buffer size 'n'
func NewReaderSize(r io.Reader, n int) *Reader {
rd := &Reader{
r: r,
data: make([]byte, 0, max(minReaderSize, n)),
}
if s, ok := r.(io.Seeker); ok {
rd.rs = s
}
return rd
}
// Reader is a buffered look-ahead reader
type Reader struct {
r io.Reader // underlying reader
// data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
data []byte // data
n int // read offset
state error // last read error
// if the reader past to NewReader was
// also an io.Seeker, this is non-nil
rs io.Seeker
}
// Reset resets the underlying reader
// and the read buffer.
func (r *Reader) Reset(rd io.Reader) {
r.r = rd
r.data = r.data[0:0]
r.n = 0
r.state = nil
if s, ok := rd.(io.Seeker); ok {
r.rs = s
} else {
r.rs = nil
}
}
// more() does one read on the underlying reader
func (r *Reader) more() {
// move data backwards so that
// the read offset is 0; this way
// we can supply the maximum number of
// bytes to the reader
if r.n != 0 {
if r.n < len(r.data) {
r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
} else {
r.data = r.data[:0]
}
r.n = 0
}
var a int
a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
if a == 0 && r.state == nil {
r.state = io.ErrNoProgress
return
} else if a > 0 && r.state == io.EOF {
// discard the io.EOF if we read more than 0 bytes.
// the next call to Read should return io.EOF again.
r.state = nil
}
r.data = r.data[:len(r.data)+a]
}
// pop error
func (r *Reader) err() (e error) {
e, r.state = r.state, nil
return
}
// pop error; EOF -> io.ErrUnexpectedEOF
func (r *Reader) noEOF() (e error) {
e, r.state = r.state, nil
if e == io.EOF {
e = io.ErrUnexpectedEOF
}
return
}
// buffered bytes
func (r *Reader) buffered() int { return len(r.data) - r.n }
// Buffered returns the number of bytes currently in the buffer
func (r *Reader) Buffered() int { return len(r.data) - r.n }
// BufferSize returns the total size of the buffer
func (r *Reader) BufferSize() int { return cap(r.data) }
// Peek returns the next 'n' buffered bytes,
// reading from the underlying reader if necessary.
// It will only return a slice shorter than 'n' bytes
// if it also returns an error. Peek does not advance
// the reader. EOF errors are *not* returned as
// io.ErrUnexpectedEOF.
func (r *Reader) Peek(n int) ([]byte, error) {
// in the degenerate case,
// we may need to realloc
// (the caller asked for more
// bytes than the size of the buffer)
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// keep filling until
// we hit an error or
// read enough bytes
for r.buffered() < n && r.state == nil {
r.more()
}
// we must have hit an error
if r.buffered() < n {
return r.data[r.n:], r.err()
}
return r.data[r.n : r.n+n], nil
}
// Skip moves the reader forward 'n' bytes.
// Returns the number of bytes skipped and any
// errors encountered. It is analogous to Seek(n, 1).
// If the underlying reader implements io.Seeker, then
// that method will be used to skip forward.
//
// If the reader encounters
// an EOF before skipping 'n' bytes, it
// returns io.ErrUnexpectedEOF. If the
// underlying reader implements io.Seeker, then
// those rules apply instead. (Many implementations
// will not return `io.EOF` until the next call
// to Read.)
func (r *Reader) Skip(n int) (int, error) {
// fast path
if r.buffered() >= n {
r.n += n
return n, nil
}
// use seeker implementation
// if we can
if r.rs != nil {
return r.skipSeek(n)
}
// loop on filling
// and then erasing
o := n
for r.buffered() < n && r.state == nil {
r.more()
// we can skip forward
// up to r.buffered() bytes
step := min(r.buffered(), n)
r.n += step
n -= step
}
// at this point, n should be
// 0 if everything went smoothly
return o - n, r.noEOF()
}
// Next returns the next 'n' bytes in the stream.
// Unlike Peek, Next advances the reader position.
// The returned bytes point to the same
// data as the buffer, so the slice is
// only valid until the next reader method call.
// An EOF is considered an unexpected error.
// If an the returned slice is less than the
// length asked for, an error will be returned,
// and the reader position will not be incremented.
func (r *Reader) Next(n int) ([]byte, error) {
// in case the buffer is too small
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// fill at least 'n' bytes
for r.buffered() < n && r.state == nil {
r.more()
}
if r.buffered() < n {
return r.data[r.n:], r.noEOF()
}
out := r.data[r.n : r.n+n]
r.n += n
return out, nil
}
// skipSeek uses the io.Seeker to seek forward.
// only call this function when n > r.buffered()
func (r *Reader) skipSeek(n int) (int, error) {
o := r.buffered()
// first, clear buffer
n -= o
r.n = 0
r.data = r.data[:0]
// then seek forward remaning bytes
i, err := r.rs.Seek(int64(n), 1)
return int(i) + o, err
}
// Read implements `io.Reader`
func (r *Reader) Read(b []byte) (int, error) {
// if we have data in the buffer, just
// return that.
if r.buffered() != 0 {
x := copy(b, r.data[r.n:])
r.n += x
return x, nil
}
var n int
// we have no buffered data; determine
// whether or not to buffer or call
// the underlying reader directly
if len(b) >= cap(r.data) {
n, r.state = r.r.Read(b)
} else {
r.more()
n = copy(b, r.data)
r.n = n
}
if n == 0 {
return 0, r.err()
}
return n, nil
}
// ReadFull attempts to read len(b) bytes into
// 'b'. It returns the number of bytes read into
// 'b', and an error if it does not return len(b).
// EOF is considered an unexpected error.
func (r *Reader) ReadFull(b []byte) (int, error) {
var n int // read into b
var nn int // scratch
l := len(b)
// either read buffered data,
// or read directly for the underlying
// buffer, or fetch more buffered data.
for n < l && r.state == nil {
if r.buffered() != 0 {
nn = copy(b[n:], r.data[r.n:])
n += nn
r.n += nn
} else if l-n > cap(r.data) {
nn, r.state = r.r.Read(b[n:])
n += nn
} else {
r.more()
}
}
if n < l {
return n, r.noEOF()
}
return n, nil
}
// ReadByte implements `io.ByteReader`
func (r *Reader) ReadByte() (byte, error) {
for r.buffered() < 1 && r.state == nil {
r.more()
}
if r.buffered() < 1 {
return 0, r.err()
}
b := r.data[r.n]
r.n++
return b, nil
}
// WriteTo implements `io.WriterTo`
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
var (
i int64
ii int
err error
)
// first, clear buffer
if r.buffered() > 0 {
ii, err = w.Write(r.data[r.n:])
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
for r.state == nil {
// here we just do
// 1:1 reads and writes
r.more()
if r.buffered() > 0 {
ii, err = w.Write(r.data)
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
}
if r.state != io.EOF {
return i, r.err()
}
return i, nil
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func max(a int, b int) int {
if a < b {
return b
}
return a
}
package fwd
import "io"
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
minWriterSize = minReaderSize
)
// Writer is a buffered writer
type Writer struct {
w io.Writer // writer
buf []byte // 0:len(buf) is bufered data
}
// NewWriter returns a new writer
// that writes to 'w' and has a buffer
// that is `DefaultWriterSize` bytes.
func NewWriter(w io.Writer) *Writer {
if wr, ok := w.(*Writer); ok {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, DefaultWriterSize),
}
}
// NewWriterSize returns a new writer
// that writes to 'w' and has a buffer
// that is 'size' bytes.
func NewWriterSize(w io.Writer, size int) *Writer {
if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, max(size, minWriterSize)),
}
}
// Buffered returns the number of buffered bytes
// in the reader.
func (w *Writer) Buffered() int { return len(w.buf) }
// BufferSize returns the maximum size of the buffer.
func (w *Writer) BufferSize() int { return cap(w.buf) }
// Flush flushes any buffered bytes
// to the underlying writer.
func (w *Writer) Flush() error {
l := len(w.buf)
if l > 0 {
n, err := w.w.Write(w.buf)
// if we didn't write the whole
// thing, copy the unwritten
// bytes to the beginnning of the
// buffer.
if n < l && n > 0 {
w.pushback(n)
if err == nil {
err = io.ErrShortWrite
}
}
if err != nil {
return err
}
w.buf = w.buf[:0]
return nil
}
return nil
}
// Write implements `io.Writer`
func (w *Writer) Write(p []byte) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(p)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
if c < ln {
return w.w.Write(p)
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], p), nil
}
// WriteString is analogous to Write, but it takes a string.
func (w *Writer) WriteString(s string) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(s)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
//
// yes, this is unsafe. *but*
// io.Writer is not allowed
// to mutate its input or
// maintain a reference to it,
// per the spec in package io.
//
// plus, if the string is really
// too big to fit in the buffer, then
// creating a copy to write it is
// expensive (and, strictly speaking,
// unnecessary)
if c < ln {
return w.w.Write(unsafestr(s))
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], s), nil
}
// WriteByte implements `io.ByteWriter`
func (w *Writer) WriteByte(b byte) error {
if len(w.buf) == cap(w.buf) {
if err := w.Flush(); err != nil {
return err
}
}
w.buf = append(w.buf, b)
return nil
}
// Next returns the next 'n' free bytes
// in the write buffer, flushing the writer
// as necessary. Next will return `io.ErrShortBuffer`
// if 'n' is greater than the size of the write buffer.
// Calls to 'next' increment the write position by
// the size of the returned buffer.
func (w *Writer) Next(n int) ([]byte, error) {
c, l := cap(w.buf), len(w.buf)
if n > c {
return nil, io.ErrShortBuffer
}
avail := c - l
if avail < n {
if err := w.Flush(); err != nil {
return nil, err
}
l = len(w.buf)
}
w.buf = w.buf[:l+n]
return w.buf[l:], nil
}
// take the bytes from w.buf[n:len(w.buf)]
// and put them at the beginning of w.buf,
// and resize to the length of the copied segment.
func (w *Writer) pushback(n int) {
w.buf = w.buf[:copy(w.buf, w.buf[n:])]
}
// ReadFrom implements `io.ReaderFrom`
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
// anticipatory flush
if err := w.Flush(); err != nil {
return 0, err
}
w.buf = w.buf[0:cap(w.buf)] // expand buffer
var nn int64 // written
var err error // error
var x int // read
// 1:1 reads and writes
for err == nil {
x, err = r.Read(w.buf)
if x > 0 {
n, werr := w.w.Write(w.buf[:x])
nn += int64(n)
if err != nil {
if n < x && n > 0 {
w.pushback(n - x)
}
return nn, werr
}
if n < x {
w.pushback(n - x)
return nn, io.ErrShortWrite
}
} else if err == nil {
err = io.ErrNoProgress
break
}
}
if err != io.EOF {
return nn, err
}
// we only clear here
// because we are sure
// the writes have
// succeeded. otherwise,
// we retain the data in case
// future writes succeed.
w.buf = w.buf[0:0]
return nn, nil
}
// +build appengine
package fwd
func unsafestr(s string) []byte { return []byte(s) }
// +build !appengine
package fwd
import (
"reflect"
"unsafe"
)
// unsafe cast string as []byte
func unsafestr(b string) []byte {
l := len(b)
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: l,
Cap: l,
Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
}))
}
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.DS_Store
\ No newline at end of file
language: go
services:
- docker
go:
- "1.9.x"
- "1.10.x"
- "1.11.x"
- "1.12.x"
- tip
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- docker pull i386/golang:1.12-alpine
- docker run -dit --name test i386/golang:1.12-alpine sh
- docker exec test sh -c "apk add --no-cache git"
- docker exec test sh -c "go get github.com/shamaton/msgpack"
- docker exec test sh -c "cd /go/src/github.com/shamaton/msgpack && git checkout `git rev-parse --abbrev-ref HEAD`"
script:
- docker exec test sh -c "cd /go/src/github.com/shamaton/msgpack && go test -v ."
- $GOPATH/bin/goveralls -service=travis-ci
MIT License
Copyright (c) 2018 Masayuki Shamoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# MessagePack for Golang
[![GoDoc](https://godoc.org/github.com/shamaton/msgpack?status.svg)](https://godoc.org/github.com/shamaton/msgpack)
[![Build Status](https://travis-ci.org/shamaton/msgpack.svg?branch=master)](https://travis-ci.org/shamaton/msgpack)
[![Coverage Status](https://coveralls.io/repos/github/shamaton/msgpack/badge.svg)](https://coveralls.io/github/shamaton/msgpack)
[![Releases](https://img.shields.io/github/release/shamaton/msgpack.svg)](https://github.com/shamaton/msgpack/releases)
* Supported types : primitive / array / slice / struct / map / interface{} and time.Time
* Renames fields via `msgpack:"field_name"`
* Ignores fields via `msgpack:"ignore"`
* Supports extend encoder / decoder
* Can also Encoding / Decoding struct as array
This package require more than golang version **1.9**
## Installation
```sh
go get -u github.com/shamaton/msgpack
```
## Quick Start
```go
package main;
import (
"github.com/shamaton/msgpack"
)
func main() {
type Struct struct {
String string
}
v := Struct{String: "msgpack"}
d, err := msgpack.Encode(v)
if err != nil {
panic(err)
}
r := Struct{}
err = msgpack.Decode(d, &r)
if err != nil {
panic(err)
}
}
```
## Benchmark
This result made from [shamaton/msgpack_bench](https://github.com/shamaton/msgpack_bench)
### Encode
```
BenchmarkCompareEncodeShamaton-4 1000000 1341 ns/op 320 B/op 3 allocs/op
BenchmarkCompareEncodeShamatonArray-4 1000000 1183 ns/op 256 B/op 3 allocs/op
BenchmarkCompareEncodeVmihailenco-4 200000 5271 ns/op 968 B/op 14 allocs/op
BenchmarkCompareEncodeVmihailencoArray-4 300000 5055 ns/op 968 B/op 14 allocs/op
BenchmarkCompareEncodeUgorji-4 1000000 1772 ns/op 872 B/op 10 allocs/op
BenchmarkCompareEncodeZeroformatter-4 1000000 1960 ns/op 744 B/op 13 allocs/op
BenchmarkCompareEncodeJson-4 300000 3679 ns/op 1224 B/op 16 allocs/op
BenchmarkCompareEncodeGob-4 100000 11988 ns/op 2824 B/op 50 allocs/op
```
### Decode
```
BenchmarkCompareDecodeShamaton-4 1000000 1501 ns/op 512 B/op 6 allocs/op
BenchmarkCompareDecodeShamatonArray-4 1000000 1032 ns/op 512 B/op 6 allocs/op
BenchmarkCompareDecodeVmihailenco-4 200000 5573 ns/op 1056 B/op 33 allocs/op
BenchmarkCompareDecodeVmihailencoArray-4 300000 4438 ns/op 992 B/op 22 allocs/op
BenchmarkCompareDecodeUgorji-4 500000 2615 ns/op 858 B/op 11 allocs/op
BenchmarkCompareDecodeJson-4 200000 9241 ns/op 1216 B/op 43 allocs/op
BenchmarkCompareDecodeGob-4 50000 37985 ns/op 10172 B/op 275 allocs/op
```
## License
This library is under the MIT License.
package msgpack
import "github.com/shamaton/msgpack/internal/decoding"
// DecodeStructAsMap decodes data that is encoded as map format.
// This is the same thing that StructAsArray sets false.
func DecodeStructAsMap(data []byte, v interface{}) error {
return decoding.Decode(data, v, false)
}
// DecodeStructAsArray decodes data that is encoded as array format.
// This is the same thing that StructAsArray sets true.
func DecodeStructAsArray(data []byte, v interface{}) error {
return decoding.Decode(data, v, true)
}
package def
// IntSize : 32 or 64
const IntSize = 32 << (^uint(0) >> 63)
var IsIntSize32 = IntSize == 32
// message pack format
const (
PositiveFixIntMin = 0x00
PositiveFixIntMax = 0x7f
FixMap = 0x80
FixArray = 0x90
FixStr = 0xa0
Nil = 0xc0
False = 0xc2
True = 0xc3
Bin8 = 0xc4
Bin16 = 0xc5
Bin32 = 0xc6
Ext8 = 0xc7
Ext16 = 0xc8
Ext32 = 0xc9
Float32 = 0xca
Float64 = 0xcb
Uint8 = 0xcc
Uint16 = 0xcd
Uint32 = 0xce
Uint64 = 0xcf
Int8 = 0xd0
Int16 = 0xd1
Int32 = 0xd2
Int64 = 0xd3
Fixext1 = 0xd4
Fixext2 = 0xd5
Fixext4 = 0xd6
Fixext8 = 0xd7
Fixext16 = 0xd8
Str8 = 0xd9
Str16 = 0xda
Str32 = 0xdb
Array16 = 0xdc
Array32 = 0xdd
Map16 = 0xde
Map32 = 0xdf
NegativeFixintMin = -32 // 0xe0
NegativeFixintMax = -1 // 0xff
)
// byte
const (
Byte1 = 1 << iota
Byte2
Byte4
Byte8
Byte16
Byte32
)
// ext type
const (
TimeStamp = -1
)
package msgpack
import (
"github.com/shamaton/msgpack/internal/encoding"
)
// EncodeStructAsMap encodes data as map format.
// This is the same thing that StructAsArray sets false.
func EncodeStructAsMap(v interface{}) ([]byte, error) {
return encoding.Encode(v, false)
}
// EncodeStructAsArray encodes data as array format.
// This is the same thing that StructAsArray sets true.
func EncodeStructAsArray(v interface{}) ([]byte, error) {
return encoding.Encode(v, true)
}
package ext
import (
"reflect"
"github.com/shamaton/msgpack/def"
)
type Decoder interface {
Code() int8
IsType(offset int, d *[]byte) bool
AsValue(offset int, k reflect.Kind, d *[]byte) (interface{}, int, error)
}
type DecoderCommon struct {
}
func (cd *DecoderCommon) ReadSize1(index int, d *[]byte) (byte, int) {
rb := def.Byte1
return (*d)[index], index + rb
}
func (cd *DecoderCommon) ReadSize2(index int, d *[]byte) ([]byte, int) {
rb := def.Byte2
return (*d)[index : index+rb], index + rb
}
func (cd *DecoderCommon) ReadSize4(index int, d *[]byte) ([]byte, int) {
rb := def.Byte4
return (*d)[index : index+rb], index + rb
}
func (cd *DecoderCommon) ReadSize8(index int, d *[]byte) ([]byte, int) {
rb := def.Byte8
return (*d)[index : index+rb], index + rb
}
func (cd *DecoderCommon) ReadSizeN(index, n int, d *[]byte) ([]byte, int) {
return (*d)[index : index+n], index + n
}
package ext
import (
"reflect"
)
type Encoder interface {
Code() int8
Type() reflect.Type
CalcByteSize(value reflect.Value) (int, error)
WriteToBytes(value reflect.Value, offset int, bytes *[]byte) int
}
type EncoderCommon struct {
}
func (c *EncoderCommon) SetByte1Int64(value int64, offset int, d *[]byte) int {
(*d)[offset] = byte(value)
return offset + 1
}
func (c *EncoderCommon) SetByte2Int64(value int64, offset int, d *[]byte) int {
(*d)[offset+0] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
func (c *EncoderCommon) SetByte4Int64(value int64, offset int, d *[]byte) int {
(*d)[offset+0] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
(*d)[offset+2] = byte(value >> 8)
(*d)[offset+3] = byte(value)
return offset + 4
}
func (c *EncoderCommon) SetByte8Int64(value int64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 56)
(*d)[offset+1] = byte(value >> 48)
(*d)[offset+2] = byte(value >> 40)
(*d)[offset+3] = byte(value >> 32)
(*d)[offset+4] = byte(value >> 24)
(*d)[offset+5] = byte(value >> 16)
(*d)[offset+6] = byte(value >> 8)
(*d)[offset+7] = byte(value)
return offset + 8
}
func (c *EncoderCommon) SetByte1Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value)
return offset + 1
}
func (c *EncoderCommon) SetByte2Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
func (c *EncoderCommon) SetByte4Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
(*d)[offset+2] = byte(value >> 8)
(*d)[offset+3] = byte(value)
return offset + 4
}
func (c *EncoderCommon) SetByte8Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 56)
(*d)[offset+1] = byte(value >> 48)
(*d)[offset+2] = byte(value >> 40)
(*d)[offset+3] = byte(value >> 32)
(*d)[offset+4] = byte(value >> 24)
(*d)[offset+5] = byte(value >> 16)
(*d)[offset+6] = byte(value >> 8)
(*d)[offset+7] = byte(value)
return offset + 8
}
func (c *EncoderCommon) SetByte1Int(code, offset int, d *[]byte) int {
(*d)[offset] = byte(code)
return offset + 1
}
func (c *EncoderCommon) SetByte2Int(value int, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
func (c *EncoderCommon) SetByte4Int(value int, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
(*d)[offset+2] = byte(value >> 8)
(*d)[offset+3] = byte(value)
return offset + 4
}
func (c *EncoderCommon) SetByte4Uint32(value uint32, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
(*d)[offset+2] = byte(value >> 8)
(*d)[offset+3] = byte(value)
return offset + 4
}
func (c *EncoderCommon) SetBytes(bs []byte, offset int, d *[]byte) int {
for i := range bs {
(*d)[offset+i] = bs[i]
}
return offset + len(bs)
}
package common
import "reflect"
// Common is used encoding/decoding
type Common struct {
}
// CheckField returns flag whether should encode/decode or not and field name
func (c *Common) CheckField(field reflect.StructField) (bool, string) {
// A to Z
if c.isPublic(field.Name) {
if tag := field.Tag.Get("msgpack"); tag == "ignore" {
return false, ""
} else if len(tag) > 0 {
return true, tag
}
return true, field.Name
}
return false, ""
}
func (c *Common) isPublic(name string) bool {
return 0x41 <= name[0] && name[0] <= 0x5a
}
package decoding
import (
"encoding/binary"
"reflect"
"unsafe"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) isCodeBin(v byte) bool {
switch v {
case def.Bin8, def.Bin16, def.Bin32:
return true
}
return false
}
func (d *decoder) asBin(offset int, k reflect.Kind) ([]byte, int, error) {
code, offset := d.readSize1(offset)
switch code {
case def.Bin8:
l, offset := d.readSize1(offset)
o := offset + int(uint8(l))
return d.data[offset:o], o, nil
case def.Bin16:
bs, offset := d.readSize2(offset)
o := offset + int(binary.BigEndian.Uint16(bs))
return d.data[offset:o], o, nil
case def.Bin32:
bs, offset := d.readSize4(offset)
o := offset + int(binary.BigEndian.Uint32(bs))
return d.data[offset:o], o, nil
}
return emptyBytes, 0, d.errorTemplate(code, k)
}
func (d *decoder) asBinString(offset int, k reflect.Kind) (string, int, error) {
bs, offset, err := d.asBin(offset, k)
return *(*string)(unsafe.Pointer(&bs)), offset, err
}
package decoding
import (
"reflect"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) asBool(offset int, k reflect.Kind) (bool, int, error) {
code := d.data[offset]
offset++
switch code {
case def.True:
return true, offset, nil
case def.False:
return false, offset, nil
}
return false, 0, d.errorTemplate(code, k)
}
package decoding
import (
"fmt"
"reflect"
"github.com/shamaton/msgpack/internal/common"
)
type decoder struct {
data []byte
asArray bool
common.Common
}
// Decode analyzes the MessagePack-encoded data and stores
// the result into the pointer of v.
func Decode(data []byte, v interface{}, asArray bool) error {
d := decoder{data: data, asArray: asArray}
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return fmt.Errorf("holder must set pointer value. but got: %t", v)
}
rv = rv.Elem()
last, err := d.decode(rv, 0)
if err != nil {
return err
}
if len(data) != last {
return fmt.Errorf("failed deserialization size=%d, last=%d", len(data), last)
}
return err
}
func (d *decoder) decode(rv reflect.Value, offset int) (int, error) {
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, err
}
rv.SetInt(v)
offset = o
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, err
}
rv.SetUint(v)
offset = o
case reflect.Float32:
v, o, err := d.asFloat32(offset, k)
if err != nil {
return 0, err
}
rv.SetFloat(float64(v))
offset = o
case reflect.Float64:
v, o, err := d.asFloat64(offset, k)
if err != nil {
return 0, err
}
rv.SetFloat(v)
offset = o
case reflect.String:
// byte slice
if d.isCodeBin(d.data[offset]) {
v, offset, err := d.asBinString(offset, k)
if err != nil {
return 0, err
}
rv.SetString(v)
return offset, nil
}
v, o, err := d.asString(offset, k)
if err != nil {
return 0, err
}
rv.SetString(v)
offset = o
case reflect.Bool:
v, o, err := d.asBool(offset, k)
if err != nil {
return 0, err
}
rv.SetBool(v)
offset = o
case reflect.Slice:
// nil
if d.isCodeNil(d.data[offset]) {
offset++
return offset, nil
}
// byte slice
if d.isCodeBin(d.data[offset]) {
bs, offset, err := d.asBin(offset, k)
if err != nil {
return 0, err
}
rv.SetBytes(bs)
return offset, nil
}
// string to bytes
if d.isCodeString(d.data[offset]) {
l, offset, err := d.stringByteLength(offset, k)
if err != nil {
return 0, err
}
bs, offset := d.asStringByte(offset, l, k)
rv.SetBytes(bs)
return offset, nil
}
// get slice length
l, o, err := d.sliceLength(offset, k)
if err != nil {
return 0, err
}
// check fixed type
fixedOffset, found, err := d.asFixedSlice(rv, o, l)
if err != nil {
return 0, err
}
if found {
return fixedOffset, nil
}
// create slice dynamically
tmpSlice := reflect.MakeSlice(rv.Type(), l, l)
for i := 0; i < l; i++ {
v := tmpSlice.Index(i)
if v.Kind() == reflect.Struct {
o, err = d.setStruct(v, o, k)
} else {
o, err = d.decode(v, o)
}
if err != nil {
return 0, err
}
}
rv.Set(tmpSlice)
offset = o
case reflect.Array:
// nil
if d.isCodeNil(d.data[offset]) {
offset++
return offset, nil
}
// byte slice
if d.isCodeBin(d.data[offset]) {
bs, offset, err := d.asBin(offset, k)
if err != nil {
return 0, err
}
if len(bs) > rv.Len() {
return 0, fmt.Errorf("%v len is %d, but msgpack has %d elements", rv.Type(), rv.Len(), len(bs))
}
for i, b := range bs {
rv.Index(i).SetUint(uint64(b))
}
return offset, nil
}
// string to bytes
if d.isCodeString(d.data[offset]) {
l, offset, err := d.stringByteLength(offset, k)
if err != nil {
return 0, err
}
if l > rv.Len() {
return 0, fmt.Errorf("%v len is %d, but msgpack has %d elements", rv.Type(), rv.Len(), l)
}
bs, offset := d.asStringByte(offset, l, k)
for i, b := range bs {
rv.Index(i).SetUint(uint64(b))
}
return offset, nil
}
// get slice length
l, o, err := d.sliceLength(offset, k)
if err != nil {
return 0, err
}
if l > rv.Len() {
return 0, fmt.Errorf("%v len is %d, but msgpack has %d elements", rv.Type(), rv.Len(), l)
}
// create array dynamically
for i := 0; i < l; i++ {
o, err = d.decode(rv.Index(i), o)
if err != nil {
return 0, err
}
}
offset = o
case reflect.Map:
// nil
if d.isCodeNil(d.data[offset]) {
offset++
return offset, nil
}
// get map length
l, o, err := d.mapLength(offset, k)
if err != nil {
return 0, err
}
// check fixed type
fixedOffset, found, err := d.asFixedMap(rv, o, l)
if err != nil {
return 0, err
}
if found {
return fixedOffset, nil
}
// create dynamically
key := rv.Type().Key()
value := rv.Type().Elem()
if rv.IsNil() {
rv.Set(reflect.MakeMapWithSize(rv.Type(), l))
}
for i := 0; i < l; i++ {
k := reflect.New(key).Elem()
v := reflect.New(value).Elem()
o, err = d.decode(k, o)
if err != nil {
return 0, err
}
o, err = d.decode(v, o)
if err != nil {
return 0, err
}
rv.SetMapIndex(k, v)
}
offset = o
case reflect.Struct:
o, err := d.setStruct(rv, offset, k)
if err != nil {
return 0, err
}
offset = o
case reflect.Ptr:
// nil
if d.isCodeNil(d.data[offset]) {
offset++
return offset, nil
}
if rv.Elem().Kind() == reflect.Invalid {
n := reflect.New(rv.Type().Elem())
rv.Set(n)
}
o, err := d.decode(rv.Elem(), offset)
if err != nil {
return 0, err
}
offset = o
case reflect.Interface:
if rv.Elem().Kind() == reflect.Ptr {
o, err := d.decode(rv.Elem(), offset)
if err != nil {
return 0, err
}
offset = o
} else {
v, o, err := d.asInterface(offset, k)
if err != nil {
return 0, err
}
if v != nil {
rv.Set(reflect.ValueOf(v))
}
offset = o
}
default:
return 0, fmt.Errorf("type(%v) is unsupported", rv.Kind())
}
return offset, nil
}
func (d *decoder) errorTemplate(code byte, k reflect.Kind) error {
return fmt.Errorf("msgpack : invalid code %x decoding %v", code, k)
}
package decoding
import (
"github.com/shamaton/msgpack/ext"
"github.com/shamaton/msgpack/time"
)
var extCoderMap = map[int8]ext.Decoder{time.Decoder.Code(): time.Decoder}
var extCoders = []ext.Decoder{time.Decoder}
// AddExtDecoder adds decoders for extension types.
func AddExtDecoder(f ext.Decoder) {
// ignore time
if f.Code() == time.Decoder.Code() {
return
}
_, ok := extCoderMap[f.Code()]
if !ok {
extCoderMap[f.Code()] = f
updateExtCoders()
}
}
// RemoveExtDecoder removes decoders for extension types.
func RemoveExtDecoder(f ext.Decoder) {
// ignore time
if f.Code() == time.Decoder.Code() {
return
}
_, ok := extCoderMap[f.Code()]
if ok {
delete(extCoderMap, f.Code())
updateExtCoders()
}
}
func updateExtCoders() {
extCoders = make([]ext.Decoder, len(extCoderMap))
i := 0
for k := range extCoderMap {
extCoders[i] = extCoderMap[k]
i++
}
}
/*
var zero = time.Unix(0,0)
func (d *decoder) isDateTime(offset int) bool {
code, offset := d.readSize1(offset)
if code == def.Fixext4 {
t, _ := d.readSize1(offset)
return int8(t) == def.TimeStamp
} else if code == def.Fixext8 {
t, _ := d.readSize1(offset)
return int8(t) == def.TimeStamp
} else if code == def.Ext8 {
l, offset := d.readSize1(offset)
t, _ := d.readSize1(offset)
return l == 12 && int8(t) == def.TimeStamp
}
return false
}
func (d *decoder) asDateTime(offset int, k reflect.Kind) (time.Time, int, error) {
code, offset := d.readSize1(offset)
switch code {
case def.Fixext4:
_, offset = d.readSize1(offset)
bs, offset := d.readSize4(offset)
return time.Unix(int64(binary.BigEndian.Uint32(bs)), 0), offset, nil
case def.Fixext8:
_, offset = d.readSize1(offset)
bs, offset := d.readSize8(offset)
data64 := binary.BigEndian.Uint64(bs)
nano := int64(data64 >> 34)
if nano > 999999999 {
return zero, 0, fmt.Errorf("In timestamp 64 formats, nanoseconds must not be larger than 999999999 : %d", nano)
}
return time.Unix(int64(data64&0x00000003ffffffff), nano), offset, nil
case def.Ext8:
_, offset = d.readSize1(offset)
_, offset = d.readSize1(offset)
nanobs, offset := d.readSize4(offset)
secbs, offset := d.readSize8(offset)
nano := binary.BigEndian.Uint32(nanobs)
if nano > 999999999 {
return zero, 0, fmt.Errorf("In timestamp 96 formats, nanoseconds must not be larger than 999999999 : %d", nano)
}
sec := binary.BigEndian.Uint64(secbs)
return time.Unix(int64(sec), int64(nano)), offset, nil
}
return zero, 0, d.errorTemplate(code, k)
}
*/
package decoding
import (
"encoding/binary"
"math"
"reflect"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) asFloat32(offset int, k reflect.Kind) (float32, int, error) {
code := d.data[offset]
switch {
case code == def.Float32:
offset++
bs, offset := d.readSize4(offset)
v := math.Float32frombits(binary.BigEndian.Uint32(bs))
return v, offset, nil
case d.isPositiveFixNum(code), code == def.Uint8, code == def.Uint16, code == def.Uint32, code == def.Uint64:
v, offset, err := d.asUint(offset, k)
if err != nil {
break
}
return float32(v), offset, nil
case d.isNegativeFixNum(code), code == def.Int8, code == def.Int16, code == def.Int32, code == def.Int64:
v, offset, err := d.asInt(offset, k)
if err != nil {
break
}
return float32(v), offset, nil
case code == def.Nil:
offset++
return 0, offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
func (d *decoder) asFloat64(offset int, k reflect.Kind) (float64, int, error) {
code := d.data[offset]
switch {
case code == def.Float64:
offset++
bs, offset := d.readSize8(offset)
v := math.Float64frombits(binary.BigEndian.Uint64(bs))
return v, offset, nil
case code == def.Float32:
offset++
bs, offset := d.readSize4(offset)
v := math.Float32frombits(binary.BigEndian.Uint32(bs))
return float64(v), offset, nil
case d.isPositiveFixNum(code), code == def.Uint8, code == def.Uint16, code == def.Uint32, code == def.Uint64:
v, offset, err := d.asUint(offset, k)
if err != nil {
break
}
return float64(v), offset, nil
case d.isNegativeFixNum(code), code == def.Int8, code == def.Int16, code == def.Int32, code == def.Int64:
v, offset, err := d.asInt(offset, k)
if err != nil {
break
}
return float64(v), offset, nil
case code == def.Nil:
offset++
return 0, offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
package decoding
import (
"encoding/binary"
"reflect"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) isPositiveFixNum(v byte) bool {
return def.PositiveFixIntMin <= v && v <= def.PositiveFixIntMax
}
func (d *decoder) isNegativeFixNum(v byte) bool {
return def.NegativeFixintMin <= int8(v) && int8(v) <= def.NegativeFixintMax
}
func (d *decoder) asInt(offset int, k reflect.Kind) (int64, int, error) {
code := d.data[offset]
switch {
case d.isPositiveFixNum(code):
b, offset := d.readSize1(offset)
return int64(b), offset, nil
case d.isNegativeFixNum(code):
b, offset := d.readSize1(offset)
return int64(int8(b)), offset, nil
case code == def.Uint8:
offset++
b, offset := d.readSize1(offset)
return int64(uint8(b)), offset, nil
case code == def.Int8:
offset++
b, offset := d.readSize1(offset)
return int64(int8(b)), offset, nil
case code == def.Uint16:
offset++
bs, offset := d.readSize2(offset)
v := binary.BigEndian.Uint16(bs)
return int64(v), offset, nil
case code == def.Int16:
offset++
bs, offset := d.readSize2(offset)
v := int16(binary.BigEndian.Uint16(bs))
return int64(v), offset, nil
case code == def.Uint32:
offset++
bs, offset := d.readSize4(offset)
v := binary.BigEndian.Uint32(bs)
return int64(v), offset, nil
case code == def.Int32:
offset++
bs, offset := d.readSize4(offset)
v := int32(binary.BigEndian.Uint32(bs))
return int64(v), offset, nil
case code == def.Uint64:
offset++
bs, offset := d.readSize8(offset)
return int64(binary.BigEndian.Uint64(bs)), offset, nil
case code == def.Int64:
offset++
bs, offset := d.readSize8(offset)
return int64(binary.BigEndian.Uint64(bs)), offset, nil
case code == def.Nil:
offset++
return 0, offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
package decoding
import (
"reflect"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) asInterface(offset int, k reflect.Kind) (interface{}, int, error) {
code := d.data[offset]
switch {
case code == def.Nil:
offset++
return nil, offset, nil
case code == def.True, code == def.False:
v, offset, err := d.asBool(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, nil
case d.isPositiveFixNum(code), code == def.Uint8:
v, offset, err := d.asUint(offset, k)
if err != nil {
return nil, 0, err
}
return uint8(v), offset, err
case code == def.Uint16:
v, offset, err := d.asUint(offset, k)
if err != nil {
return nil, 0, err
}
return uint16(v), offset, err
case code == def.Uint32:
v, offset, err := d.asUint(offset, k)
if err != nil {
return nil, 0, err
}
return uint32(v), offset, err
case code == def.Uint64:
v, offset, err := d.asUint(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case d.isNegativeFixNum(code), code == def.Int8:
v, offset, err := d.asInt(offset, k)
if err != nil {
return nil, 0, err
}
return int8(v), offset, err
case code == def.Int16:
v, offset, err := d.asInt(offset, k)
if err != nil {
return nil, 0, err
}
return int16(v), offset, err
case code == def.Int32:
v, offset, err := d.asInt(offset, k)
if err != nil {
return nil, 0, err
}
return int32(v), offset, err
case code == def.Int64:
v, offset, err := d.asInt(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case code == def.Float32:
v, offset, err := d.asFloat32(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case code == def.Float64:
v, offset, err := d.asFloat64(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case d.isFixString(code), code == def.Str8, code == def.Str16, code == def.Str32:
v, offset, err := d.asString(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case code == def.Bin8, code == def.Bin16, code == def.Bin32:
v, offset, err := d.asBin(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, err
case d.isFixSlice(code), code == def.Array16, code == def.Array32:
l, o, err := d.sliceLength(offset, k)
if err != nil {
return nil, 0, err
}
v := make([]interface{}, l)
for i := 0; i < l; i++ {
vv, o2, err := d.asInterface(o, k)
if err != nil {
return nil, 0, err
}
v[i] = vv
o = o2
}
offset = o
return v, offset, nil
case d.isFixMap(code), code == def.Map16, code == def.Map32:
l, o, err := d.mapLength(offset, k)
if err != nil {
return nil, 0, err
}
v := make(map[interface{}]interface{}, l)
for i := 0; i < l; i++ {
key, o2, err := d.asInterface(o, k)
if err != nil {
return nil, 0, err
}
value, o2, err := d.asInterface(o2, k)
if err != nil {
return nil, 0, err
}
v[key] = value
o = o2
}
offset = o
return v, offset, nil
}
/* use ext
if d.isDateTime(offset) {
v, offset, err := d.asDateTime(offset, k)
if err != nil {
return nil, 0, err
}
return v, offset, nil
}
*/
// ext
for i := range extCoders {
if extCoders[i].IsType(offset, &d.data) {
v, offset, err := extCoders[i].AsValue(offset, k, &d.data)
if err != nil {
return nil, 0, err
}
return v, offset, nil
}
}
return nil, 0, d.errorTemplate(code, k)
}
package decoding
import "github.com/shamaton/msgpack/def"
func (d *decoder) isCodeNil(v byte) bool {
return def.Nil == v
}
package decoding
import (
"github.com/shamaton/msgpack/def"
)
func (d *decoder) readSize1(index int) (byte, int) {
rb := def.Byte1
return d.data[index], index + rb
}
func (d *decoder) readSize2(index int) ([]byte, int) {
rb := def.Byte2
return d.data[index : index+rb], index + rb
}
func (d *decoder) readSize4(index int) ([]byte, int) {
rb := def.Byte4
return d.data[index : index+rb], index + rb
}
func (d *decoder) readSize8(index int) ([]byte, int) {
rb := def.Byte8
return d.data[index : index+rb], index + rb
}
func (d *decoder) readSizeN(index, n int) ([]byte, int) {
return d.data[index : index+n], index + n
}
package decoding
import (
"encoding/binary"
"reflect"
"github.com/shamaton/msgpack/def"
)
var (
typeIntSlice = reflect.TypeOf([]int{})
typeInt8Slice = reflect.TypeOf([]int8{})
typeInt16Slice = reflect.TypeOf([]int16{})
typeInt32Slice = reflect.TypeOf([]int32{})
typeInt64Slice = reflect.TypeOf([]int64{})
typeUintSlice = reflect.TypeOf([]uint{})
typeUint8Slice = reflect.TypeOf([]uint8{})
typeUint16Slice = reflect.TypeOf([]uint16{})
typeUint32Slice = reflect.TypeOf([]uint32{})
typeUint64Slice = reflect.TypeOf([]uint64{})
typeFloat32Slice = reflect.TypeOf([]float32{})
typeFloat64Slice = reflect.TypeOf([]float64{})
typeStringSlice = reflect.TypeOf([]string{})
typeBoolSlice = reflect.TypeOf([]bool{})
)
func (d *decoder) isFixSlice(v byte) bool {
return def.FixArray <= v && v <= def.FixArray+0x0f
}
func (d *decoder) sliceLength(offset int, k reflect.Kind) (int, int, error) {
code, offset := d.readSize1(offset)
switch {
case d.isFixSlice(code):
return int(code - def.FixArray), offset, nil
case code == def.Array16:
bs, offset := d.readSize2(offset)
return int(binary.BigEndian.Uint16(bs)), offset, nil
case code == def.Array32:
bs, offset := d.readSize4(offset)
return int(binary.BigEndian.Uint32(bs)), offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
func (d *decoder) asFixedSlice(rv reflect.Value, offset int, l int) (int, bool, error) {
t := rv.Type()
k := t.Elem().Kind()
switch t {
case typeIntSlice:
sli := make([]int, l)
for i := range sli {
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = int(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeUintSlice:
sli := make([]uint, l)
for i := range sli {
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = uint(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeStringSlice:
sli := make([]string, l)
for i := range sli {
v, o, err := d.asString(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeBoolSlice:
sli := make([]bool, l)
for i := range sli {
v, o, err := d.asBool(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeFloat32Slice:
sli := make([]float32, l)
for i := range sli {
v, o, err := d.asFloat32(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeFloat64Slice:
sli := make([]float64, l)
for i := range sli {
v, o, err := d.asFloat64(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeInt8Slice:
sli := make([]int8, l)
for i := range sli {
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = int8(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeInt16Slice:
sli := make([]int16, l)
for i := range sli {
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = int16(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeInt32Slice:
sli := make([]int32, l)
for i := range sli {
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = int32(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeInt64Slice:
sli := make([]int64, l)
for i := range sli {
v, o, err := d.asInt(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeUint8Slice:
sli := make([]uint8, l)
for i := range sli {
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = uint8(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeUint16Slice:
sli := make([]uint16, l)
for i := range sli {
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = uint16(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeUint32Slice:
sli := make([]uint32, l)
for i := range sli {
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = uint32(v)
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
case typeUint64Slice:
sli := make([]uint64, l)
for i := range sli {
v, o, err := d.asUint(offset, k)
if err != nil {
return 0, false, err
}
sli[i] = v
offset = o
}
rv.Set(reflect.ValueOf(sli))
return offset, true, nil
}
return offset, false, nil
}
package decoding
import (
"encoding/binary"
"reflect"
"unsafe"
"github.com/shamaton/msgpack/def"
)
var emptyString = ""
var emptyBytes = []byte{}
func (d *decoder) isCodeString(code byte) bool {
return d.isFixString(code) || code == def.Str8 || code == def.Str16 || code == def.Str32
}
func (d *decoder) isFixString(v byte) bool {
return def.FixStr <= v && v <= def.FixStr+0x1f
}
func (d *decoder) stringByteLength(offset int, k reflect.Kind) (int, int, error) {
code := d.data[offset]
offset++
if def.FixStr <= code && code <= def.FixStr+0x1f {
l := int(code - def.FixStr)
return l, offset, nil
} else if code == def.Str8 {
b, offset := d.readSize1(offset)
return int(b), offset, nil
} else if code == def.Str16 {
b, offset := d.readSize2(offset)
return int(binary.BigEndian.Uint16(b)), offset, nil
} else if code == def.Str32 {
b, offset := d.readSize4(offset)
return int(binary.BigEndian.Uint32(b)), offset, nil
} else if code == def.Nil {
return 0, offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
func (d *decoder) asString(offset int, k reflect.Kind) (string, int, error) {
l, offset, err := d.stringByteLength(offset, k)
if err != nil {
return emptyString, 0, err
}
bs, offset := d.asStringByte(offset, l, k)
return *(*string)(unsafe.Pointer(&bs)), offset, nil
}
func (d *decoder) asStringByte(offset int, l int, k reflect.Kind) ([]byte, int) {
if l < 1 {
return emptyBytes, offset
}
return d.readSizeN(offset, l)
}
package decoding
import (
"encoding/binary"
"reflect"
"sync"
"github.com/shamaton/msgpack/def"
)
type structCacheTypeMap struct {
m map[string]int
}
type structCacheTypeArray struct {
m []int
}
// struct cache map
var mapSCTM = sync.Map{}
var mapSCTA = sync.Map{}
func (d *decoder) setStruct(rv reflect.Value, offset int, k reflect.Kind) (int, error) {
/*
if d.isDateTime(offset) {
dt, offset, err := d.asDateTime(offset, k)
if err != nil {
return 0, err
}
rv.Set(reflect.ValueOf(dt))
return offset, nil
}
*/
for i := range extCoders {
if extCoders[i].IsType(offset, &d.data) {
v, offset, err := extCoders[i].AsValue(offset, k, &d.data)
if err != nil {
return 0, err
}
rv.Set(reflect.ValueOf(v))
return offset, nil
}
}
if d.asArray {
return d.setStructFromArray(rv, offset, k)
}
return d.setStructFromMap(rv, offset, k)
}
func (d *decoder) setStructFromArray(rv reflect.Value, offset int, k reflect.Kind) (int, error) {
// get length
l, o, err := d.sliceLength(offset, k)
if err != nil {
return 0, err
}
// find or create reference
var scta *structCacheTypeArray
cache, findCache := mapSCTA.Load(rv.Type())
if !findCache {
scta = &structCacheTypeArray{}
for i := 0; i < rv.NumField(); i++ {
if ok, _ := d.CheckField(rv.Type().Field(i)); ok {
scta.m = append(scta.m, i)
}
}
mapSCTA.Store(rv.Type(), scta)
} else {
scta = cache.(*structCacheTypeArray)
}
// set value
for i := 0; i < l; i++ {
if i < len(scta.m) {
o, err = d.decode(rv.Field(scta.m[i]), o)
if err != nil {
return 0, err
}
} else {
o = d.jumpOffset(o)
}
}
return o, nil
}
func (d *decoder) setStructFromMap(rv reflect.Value, offset int, k reflect.Kind) (int, error) {
// get length
l, o, err := d.mapLength(offset, k)
if err != nil {
return 0, err
}
// find or create reference
var sctm *structCacheTypeMap
cache, cacheFind := mapSCTM.Load(rv.Type())
if !cacheFind {
sctm = &structCacheTypeMap{m: map[string]int{}}
for i := 0; i < rv.NumField(); i++ {
if ok, name := d.CheckField(rv.Type().Field(i)); ok {
sctm.m[name] = i
}
}
mapSCTM.Store(rv.Type(), sctm)
} else {
sctm = cache.(*structCacheTypeMap)
}
// set value if string correct
for i := 0; i < l; i++ {
key, o2, err := d.asString(o, k)
if err != nil {
return 0, err
}
if _, ok := sctm.m[key]; ok {
o2, err = d.decode(rv.Field(sctm.m[key]), o2)
if err != nil {
return 0, err
}
} else {
o2 = d.jumpOffset(o2)
}
o = o2
}
return o, nil
}
func (d *decoder) jumpOffset(offset int) int {
code, offset := d.readSize1(offset)
switch {
case code == def.True, code == def.False, code == def.Nil:
// do nothing
case d.isPositiveFixNum(code) || d.isNegativeFixNum(code):
// do nothing
case code == def.Uint8, code == def.Int8:
offset += def.Byte1
case code == def.Uint16, code == def.Int16:
offset += def.Byte2
case code == def.Uint32, code == def.Int32, code == def.Float32:
offset += def.Byte4
case code == def.Uint64, code == def.Int64, code == def.Float64:
offset += def.Byte8
case d.isFixString(code):
offset += int(code - def.FixStr)
case code == def.Str8, code == def.Bin8:
b, o := d.readSize1(offset)
o += int(b)
offset = o
case code == def.Str16, code == def.Bin16:
bs, o := d.readSize2(offset)
o += int(binary.BigEndian.Uint16(bs))
offset = o
case code == def.Str32, code == def.Bin32:
bs, o := d.readSize4(offset)
o += int(binary.BigEndian.Uint32(bs))
offset = o
case d.isFixSlice(code):
l := int(code - def.FixArray)
for i := 0; i < l; i++ {
offset = d.jumpOffset(offset)
}
case code == def.Array16:
bs, o := d.readSize2(offset)
l := int(binary.BigEndian.Uint16(bs))
for i := 0; i < l; i++ {
o = d.jumpOffset(o)
}
offset = o
case code == def.Array32:
bs, o := d.readSize4(offset)
l := int(binary.BigEndian.Uint32(bs))
for i := 0; i < l; i++ {
o = d.jumpOffset(o)
}
offset = o
case d.isFixMap(code):
l := int(code - def.FixMap)
for i := 0; i < l*2; i++ {
offset = d.jumpOffset(offset)
}
case code == def.Map16:
bs, o := d.readSize2(offset)
l := int(binary.BigEndian.Uint16(bs))
for i := 0; i < l*2; i++ {
o = d.jumpOffset(o)
}
offset = o
case code == def.Map32:
bs, o := d.readSize4(offset)
l := int(binary.BigEndian.Uint32(bs))
for i := 0; i < l*2; i++ {
o = d.jumpOffset(o)
}
offset = o
case code == def.Fixext1:
offset += def.Byte1 + def.Byte1
case code == def.Fixext2:
offset += def.Byte1 + def.Byte2
case code == def.Fixext4:
offset += def.Byte1 + def.Byte4
case code == def.Fixext8:
offset += def.Byte1 + def.Byte8
case code == def.Fixext16:
offset += def.Byte1 + def.Byte16
case code == def.Ext8:
b, o := d.readSize1(offset)
o += def.Byte1 + int(b)
offset = o
case code == def.Ext16:
bs, o := d.readSize2(offset)
o += def.Byte1 + int(binary.BigEndian.Uint16(bs))
offset = o
case code == def.Ext32:
bs, o := d.readSize4(offset)
o += def.Byte1 + int(binary.BigEndian.Uint32(bs))
offset = o
}
return offset
}
package decoding
import (
"encoding/binary"
"reflect"
"github.com/shamaton/msgpack/def"
)
func (d *decoder) asUint(offset int, k reflect.Kind) (uint64, int, error) {
code := d.data[offset]
switch {
case d.isPositiveFixNum(code):
b, offset := d.readSize1(offset)
return uint64(b), offset, nil
case d.isNegativeFixNum(code):
b, offset := d.readSize1(offset)
return uint64(int8(b)), offset, nil
case code == def.Uint8:
offset++
b, offset := d.readSize1(offset)
return uint64(uint8(b)), offset, nil
case code == def.Int8:
offset++
b, offset := d.readSize1(offset)
return uint64(int8(b)), offset, nil
case code == def.Uint16:
offset++
bs, offset := d.readSize2(offset)
v := binary.BigEndian.Uint16(bs)
return uint64(v), offset, nil
case code == def.Int16:
offset++
bs, offset := d.readSize2(offset)
v := int16(binary.BigEndian.Uint16(bs))
return uint64(v), offset, nil
case code == def.Uint32:
offset++
bs, offset := d.readSize4(offset)
v := binary.BigEndian.Uint32(bs)
return uint64(v), offset, nil
case code == def.Int32:
offset++
bs, offset := d.readSize4(offset)
v := int32(binary.BigEndian.Uint32(bs))
return uint64(v), offset, nil
case code == def.Uint64:
offset++
bs, offset := d.readSize8(offset)
return binary.BigEndian.Uint64(bs), offset, nil
case code == def.Int64:
offset++
bs, offset := d.readSize8(offset)
return binary.BigEndian.Uint64(bs), offset, nil
case code == def.Nil:
offset++
return 0, offset, nil
}
return 0, 0, d.errorTemplate(code, k)
}
package encoding
import "github.com/shamaton/msgpack/def"
func (e *encoder) calcBool() int {
return 0
}
func (e *encoder) writeBool(v bool, offset int) int {
if v {
offset = e.setByte1Int(def.True, offset)
} else {
offset = e.setByte1Int(def.False, offset)
}
return offset
}
package encoding
import (
"fmt"
"math"
"reflect"
"github.com/shamaton/msgpack/def"
)
var typeByte = reflect.TypeOf(byte(0))
func (e *encoder) isByteSlice(rv reflect.Value) bool {
return rv.Type().Elem() == typeByte
}
func (e *encoder) calcByteSlice(l int) (int, error) {
if l <= math.MaxUint8 {
return def.Byte1 + l, nil
} else if l <= math.MaxUint16 {
return def.Byte2 + l, nil
} else if uint(l) <= math.MaxUint32 {
return def.Byte4 + l, nil
}
// not supported error
return 0, fmt.Errorf("not support this array length : %d", l)
}
func (e *encoder) writeByteSliceLength(l int, offset int) int {
if l <= math.MaxUint8 {
offset = e.setByte1Int(def.Bin8, offset)
offset = e.setByte1Int(l, offset)
} else if l <= math.MaxUint16 {
offset = e.setByte1Int(def.Bin16, offset)
offset = e.setByte2Int(l, offset)
} else if uint(l) <= math.MaxUint32 {
offset = e.setByte1Int(def.Bin32, offset)
offset = e.setByte4Int(l, offset)
}
return offset
}
package encoding
import (
"reflect"
"github.com/shamaton/msgpack/ext"
"github.com/shamaton/msgpack/time"
)
var extCoderMap = map[reflect.Type]ext.Encoder{time.Encoder.Type(): time.Encoder}
var extCoders = []ext.Encoder{time.Encoder}
// AddExtEncoder adds encoders for extension types.
func AddExtEncoder(f ext.Encoder) {
// ignore time
if f.Type() == time.Encoder.Type() {
return
}
_, ok := extCoderMap[f.Type()]
if !ok {
extCoderMap[f.Type()] = f
updateExtCoders()
}
}
// RemoveExtEncoder removes encoders for extension types.
func RemoveExtEncoder(f ext.Encoder) {
// ignore time
if f.Type() == time.Encoder.Type() {
return
}
_, ok := extCoderMap[f.Type()]
if ok {
delete(extCoderMap, f.Type())
updateExtCoders()
}
}
func updateExtCoders() {
extCoders = make([]ext.Encoder, len(extCoderMap))
i := 0
for k := range extCoderMap {
extCoders[i] = extCoderMap[k]
i++
}
}
/*
func (e *encoder) isDateTime(value reflect.Value) (bool, time.Time) {
i := value.Interface()
switch t := i.(type) {
case time.Time:
return true, t
}
return false, now
}
func (e *encoder) calcTime(t time.Time) int {
secs := uint64(t.Unix())
if secs>>34 == 0 {
data := uint64(t.Nanosecond())<<34 | secs
if data&0xffffffff00000000 == 0 {
return def.Byte1 + def.Byte4
}
return def.Byte1 + def.Byte8
}
return def.Byte1 + def.Byte1 + def.Byte4 + def.Byte8
}
func (e *encoder) writeTime(t time.Time, offset int) int {
secs := uint64(t.Unix())
if secs>>34 == 0 {
data := uint64(t.Nanosecond())<<34 | secs
if data&0xffffffff00000000 == 0 {
offset = e.setByte1Int(def.Fixext4, offset)
offset = e.setByte1Int(def.TimeStamp, offset)
offset = e.setByte4Uint64(data, offset)
return offset
}
offset = e.setByte1Int(def.Fixext8, offset)
offset = e.setByte1Int(def.TimeStamp, offset)
offset = e.setByte8Uint64(data, offset)
return offset
}
offset = e.setByte1Int(def.Ext8, offset)
offset = e.setByte1Int(12, offset)
offset = e.setByte1Int(def.TimeStamp, offset)
offset = e.setByte4Int(t.Nanosecond(), offset)
offset = e.setByte8Uint64(secs, offset)
return offset
}
*/
package encoding
import (
"math"
"github.com/shamaton/msgpack/def"
)
func (e *encoder) calcFloat32(v float64) int {
return def.Byte4
}
func (e *encoder) calcFloat64(v float64) int {
return def.Byte8
}
func (e *encoder) writeFloat32(v float64, offset int) int {
offset = e.setByte1Int(def.Float32, offset)
offset = e.setByte4Uint64(uint64(math.Float32bits(float32(v))), offset)
return offset
}
func (e *encoder) writeFloat64(v float64, offset int) int {
offset = e.setByte1Int(def.Float64, offset)
offset = e.setByte8Uint64(math.Float64bits(v), offset)
return offset
}
package encoding
import (
"math"
"github.com/shamaton/msgpack/def"
)
func (e *encoder) isPositiveFixInt64(v int64) bool {
return def.PositiveFixIntMin <= v && v <= def.PositiveFixIntMax
}
func (e *encoder) isNegativeFixInt64(v int64) bool {
return def.NegativeFixintMin <= v && v <= def.NegativeFixintMax
}
func (e *encoder) calcInt(v int64) int {
if v >= 0 {
return e.calcUint(uint64(v))
} else if e.isNegativeFixInt64(v) {
// format code only
return 0
} else if v >= math.MinInt8 {
return def.Byte1
} else if v >= math.MinInt16 {
return def.Byte2
} else if v >= math.MinInt32 {
return def.Byte4
}
return def.Byte8
}
func (e *encoder) writeInt(v int64, offset int) int {
if v >= 0 {
offset = e.writeUint(uint64(v), offset)
} else if e.isNegativeFixInt64(v) {
offset = e.setByte1Int64(v, offset)
} else if v >= math.MinInt8 {
offset = e.setByte1Int(def.Int8, offset)
offset = e.setByte1Int64(v, offset)
} else if v >= math.MinInt16 {
offset = e.setByte1Int(def.Int16, offset)
offset = e.setByte2Int64(v, offset)
} else if v >= math.MinInt32 {
offset = e.setByte1Int(def.Int32, offset)
offset = e.setByte4Int64(v, offset)
} else {
offset = e.setByte1Int(def.Int64, offset)
offset = e.setByte8Int64(v, offset)
}
return offset
}
package encoding
import "github.com/shamaton/msgpack/def"
func (e *encoder) writeNil(offset int) int {
offset = e.setByte1Int(def.Nil, offset)
return offset
}
package encoding
func (e *encoder) setByte1Int64(value int64, offset int) int {
e.d[offset] = byte(value)
return offset + 1
}
func (e *encoder) setByte2Int64(value int64, offset int) int {
e.d[offset+0] = byte(value >> 8)
e.d[offset+1] = byte(value)
return offset + 2
}
func (e *encoder) setByte4Int64(value int64, offset int) int {
e.d[offset+0] = byte(value >> 24)
e.d[offset+1] = byte(value >> 16)
e.d[offset+2] = byte(value >> 8)
e.d[offset+3] = byte(value)
return offset + 4
}
func (e *encoder) setByte8Int64(value int64, offset int) int {
e.d[offset] = byte(value >> 56)
e.d[offset+1] = byte(value >> 48)
e.d[offset+2] = byte(value >> 40)
e.d[offset+3] = byte(value >> 32)
e.d[offset+4] = byte(value >> 24)
e.d[offset+5] = byte(value >> 16)
e.d[offset+6] = byte(value >> 8)
e.d[offset+7] = byte(value)
return offset + 8
}
func (e *encoder) setByte1Uint64(value uint64, offset int) int {
e.d[offset] = byte(value)
return offset + 1
}
func (e *encoder) setByte2Uint64(value uint64, offset int) int {
e.d[offset] = byte(value >> 8)
e.d[offset+1] = byte(value)
return offset + 2
}
func (e *encoder) setByte4Uint64(value uint64, offset int) int {
e.d[offset] = byte(value >> 24)
e.d[offset+1] = byte(value >> 16)
e.d[offset+2] = byte(value >> 8)
e.d[offset+3] = byte(value)
return offset + 4
}
func (e *encoder) setByte8Uint64(value uint64, offset int) int {
e.d[offset] = byte(value >> 56)
e.d[offset+1] = byte(value >> 48)
e.d[offset+2] = byte(value >> 40)
e.d[offset+3] = byte(value >> 32)
e.d[offset+4] = byte(value >> 24)
e.d[offset+5] = byte(value >> 16)
e.d[offset+6] = byte(value >> 8)
e.d[offset+7] = byte(value)
return offset + 8
}
func (e *encoder) setByte1Int(code, offset int) int {
e.d[offset] = byte(code)
return offset + 1
}
func (e *encoder) setByte2Int(value int, offset int) int {
e.d[offset] = byte(value >> 8)
e.d[offset+1] = byte(value)
return offset + 2
}
func (e *encoder) setByte4Int(value int, offset int) int {
e.d[offset] = byte(value >> 24)
e.d[offset+1] = byte(value >> 16)
e.d[offset+2] = byte(value >> 8)
e.d[offset+3] = byte(value)
return offset + 4
}
func (e *encoder) setByte4Uint32(value uint32, offset int) int {
e.d[offset] = byte(value >> 24)
e.d[offset+1] = byte(value >> 16)
e.d[offset+2] = byte(value >> 8)
e.d[offset+3] = byte(value)
return offset + 4
}
func (e *encoder) setBytes(bs []byte, offset int) int {
for i := range bs {
e.d[offset+i] = bs[i]
}
return offset + len(bs)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Copyright (c) 2015, someonegg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// empty .s so `go build` does not use -complete for go:linkname to work
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment