From b06cf141564ff764a756b879f9b74d368c99bb94 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Sat, 3 Jun 2017 20:45:21 +0200 Subject: [PATCH 01/13] Autotuning low and high bins' limits. It solves the problem with buffers larger than 2^25 bits (32MB), the histogram (calls) is skewed to the right and the performance will be very if the percentile 95 is reached at the last bin. --- pool.go | 45 ++++++++++++++++++++++++++++++++++++++------- pool_test.go | 20 ++++++++++---------- 2 files changed, 48 insertions(+), 17 deletions(-) diff --git a/pool.go b/pool.go index 8bb4134..4d496ab 100644 --- a/pool.go +++ b/pool.go @@ -7,11 +7,11 @@ import ( ) const ( - minBitSize = 6 // 2**6=64 is a CPU cache line size - steps = 20 + defaultMinBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 - minSize = 1 << minBitSize - maxSize = 1 << (minBitSize + steps - 1) + defaultMinSize = 1 << defaultMinBitSize + defaultMaxSize = 1 << (defaultMinBitSize + steps - 1) calibrateCallsThreshold = 42000 maxPercentile = 0.95 @@ -29,6 +29,9 @@ type Pool struct { defaultSize uint64 maxSize uint64 + minBitSize uint64 + minSize uint64 + pool sync.Pool } @@ -65,7 +68,11 @@ func Put(b *ByteBuffer) { defaultPool.Put(b) } // // The buffer mustn't be accessed after returning to the pool. func (p *Pool) Put(b *ByteBuffer) { - idx := index(len(b.B)) + if p.minBitSize == 0 { + p.initBins() + } + + idx := index(p.minBitSize, len(b.B)) if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { p.calibrate() @@ -83,6 +90,10 @@ func (p *Pool) calibrate() { return } + if p.minBitSize == 0 { + p.initBins() + } + a := make(callSizes, 0, steps) var callsSum uint64 for i := uint64(0); i < steps; i++ { @@ -90,9 +101,19 @@ func (p *Pool) calibrate() { callsSum += calls a = append(a, callSize{ calls: calls, - size: minSize << i, + size: p.minSize << i, }) } + if p.minBitSize+steps < 32 && a[steps-1].calls > a[0].calls { + // Increase the first bin's size + p.resizeBins(p.minBitSize + 1) + } else if p.minBitSize > defaultMinBitSize && + a[0].calls > 0 && + a[steps-2].calls == 0 && + a[steps-1].calls == 0 { + // Decrease the size of first bin's size + p.resizeBins(p.minBitSize - 1) + } sort.Sort(a) defaultSize := a[0].size @@ -117,6 +138,16 @@ func (p *Pool) calibrate() { atomic.StoreUint64(&p.calibrating, 0) } +func (p *Pool) resizeBins(minBitSize uint64) { + atomic.StoreUint64(&p.minBitSize, minBitSize) + atomic.StoreUint64(&p.minSize, 1<>= minBitSize idx := 0 diff --git a/pool_test.go b/pool_test.go index 6d3bcb8..b357319 100644 --- a/pool_test.go +++ b/pool_test.go @@ -10,21 +10,21 @@ func TestIndex(t *testing.T) { testIndex(t, 0, 0) testIndex(t, 1, 0) - testIndex(t, minSize-1, 0) - testIndex(t, minSize, 0) - testIndex(t, minSize+1, 1) + testIndex(t, defaultMinSize-1, 0) + testIndex(t, defaultMinSize, 0) + testIndex(t, defaultMinSize+1, 1) - testIndex(t, 2*minSize-1, 1) - testIndex(t, 2*minSize, 1) - testIndex(t, 2*minSize+1, 2) + testIndex(t, 2*defaultMinSize-1, 1) + testIndex(t, 2*defaultMinSize, 1) + testIndex(t, 2*defaultMinSize+1, 2) - testIndex(t, maxSize-1, steps-1) - testIndex(t, maxSize, steps-1) - testIndex(t, maxSize+1, steps-1) + testIndex(t, defaultMaxSize-1, steps-1) + testIndex(t, defaultMaxSize, steps-1) + testIndex(t, defaultMaxSize+1, steps-1) } func testIndex(t *testing.T, n, expectedIdx int) { - idx := index(n) + idx := index(defaultMinBitSize, n) if idx != expectedIdx { t.Fatalf("unexpected idx for n=%d: %d. Expecting %d", n, idx, expectedIdx) } From 02c4518f457b5a9d21cfa46375ec3ef1887aeb1a Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Fri, 24 Nov 2017 01:56:13 +0100 Subject: [PATCH 02/13] Use shortlivedpool --- pool.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pool.go b/pool.go index 4d496ab..8a071e6 100644 --- a/pool.go +++ b/pool.go @@ -2,8 +2,9 @@ package bytebufferpool import ( "sort" - "sync" "sync/atomic" + + "github.com/gallir/shortlivedpool" ) const ( @@ -32,7 +33,7 @@ type Pool struct { minBitSize uint64 minSize uint64 - pool sync.Pool + pool shortlivedpool.Pool } var defaultPool Pool From a6bad013649192169ab24f9dc34ef787e9a6a04a Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Fri, 24 Nov 2017 17:18:54 +0100 Subject: [PATCH 03/13] Added conditional compilation of shortlived pool --- pool.go | 20 -------------------- shortlivedpool.go | 31 +++++++++++++++++++++++++++++++ standardpool.go | 30 ++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 20 deletions(-) create mode 100644 shortlivedpool.go create mode 100644 standardpool.go diff --git a/pool.go b/pool.go index 8a071e6..aca4f5b 100644 --- a/pool.go +++ b/pool.go @@ -3,8 +3,6 @@ package bytebufferpool import ( "sort" "sync/atomic" - - "github.com/gallir/shortlivedpool" ) const ( @@ -18,24 +16,6 @@ const ( maxPercentile = 0.95 ) -// Pool represents byte buffer pool. -// -// Distinct pools may be used for distinct types of byte buffers. -// Properly determined byte buffer types with their own pools may help reducing -// memory waste. -type Pool struct { - calls [steps]uint64 - calibrating uint64 - - defaultSize uint64 - maxSize uint64 - - minBitSize uint64 - minSize uint64 - - pool shortlivedpool.Pool -} - var defaultPool Pool // Get returns an empty byte buffer from the pool. diff --git a/shortlivedpool.go b/shortlivedpool.go new file mode 100644 index 0000000..929cd2e --- /dev/null +++ b/shortlivedpool.go @@ -0,0 +1,31 @@ +// +build shortlivedpool + +package bytebufferpool + +import ( + "log" + + "github.com/gallir/shortlivedpool" +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + minBitSize uint64 + minSize uint64 + + pool shortlivedpool.Pool +} + +func init() { + log.Println("Using github.com/gallir/shortlivedpooll") +} diff --git a/standardpool.go b/standardpool.go new file mode 100644 index 0000000..f96ec92 --- /dev/null +++ b/standardpool.go @@ -0,0 +1,30 @@ +// +build !shortlivedpool + +package bytebufferpool + +import ( + "log" + "sync" +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + minBitSize uint64 + minSize uint64 + + pool sync.Pool +} + +func init() { + log.Println("Using standard sync.pool") +} From 3daaec9e37fdb2d519a7fdb0a09adf6064a4bfd7 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Fri, 24 Nov 2017 17:26:53 +0100 Subject: [PATCH 04/13] Fixed typo --- shortlivedpool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shortlivedpool.go b/shortlivedpool.go index 929cd2e..e6241bc 100644 --- a/shortlivedpool.go +++ b/shortlivedpool.go @@ -27,5 +27,5 @@ type Pool struct { } func init() { - log.Println("Using github.com/gallir/shortlivedpooll") + log.Println("Using github.com/gallir/shortlivedpool instead of sync.pool") } From 4c5299ea36d31095a58427d6a9c747d26328ed1b Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Fri, 24 Nov 2017 17:45:15 +0100 Subject: [PATCH 05/13] Use embedded --- pool.go | 18 ++++++++++++++++++ shortlivedpool.go | 16 +--------------- standardpool.go | 16 +--------------- 3 files changed, 20 insertions(+), 30 deletions(-) diff --git a/pool.go b/pool.go index aca4f5b..e66188b 100644 --- a/pool.go +++ b/pool.go @@ -16,6 +16,24 @@ const ( maxPercentile = 0.95 ) +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + minBitSize uint64 + minSize uint64 + + actualPool // Conditional compilation on flag +} + var defaultPool Pool // Get returns an empty byte buffer from the pool. diff --git a/shortlivedpool.go b/shortlivedpool.go index e6241bc..5aee15a 100644 --- a/shortlivedpool.go +++ b/shortlivedpool.go @@ -8,21 +8,7 @@ import ( "github.com/gallir/shortlivedpool" ) -// Pool represents byte buffer pool. -// -// Distinct pools may be used for distinct types of byte buffers. -// Properly determined byte buffer types with their own pools may help reducing -// memory waste. -type Pool struct { - calls [steps]uint64 - calibrating uint64 - - defaultSize uint64 - maxSize uint64 - - minBitSize uint64 - minSize uint64 - +type actualPool struct { pool shortlivedpool.Pool } diff --git a/standardpool.go b/standardpool.go index f96ec92..12571a6 100644 --- a/standardpool.go +++ b/standardpool.go @@ -7,21 +7,7 @@ import ( "sync" ) -// Pool represents byte buffer pool. -// -// Distinct pools may be used for distinct types of byte buffers. -// Properly determined byte buffer types with their own pools may help reducing -// memory waste. -type Pool struct { - calls [steps]uint64 - calibrating uint64 - - defaultSize uint64 - maxSize uint64 - - minBitSize uint64 - minSize uint64 - +type actualPool struct { pool sync.Pool } From 885a9339ad7a11202756ec746a154500c8f18f9d Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Mon, 27 Nov 2017 19:17:04 +0100 Subject: [PATCH 06/13] Added GetLen function --- bytebuffer_test.go | 41 +++++++++++++++++++++++++++++++++++++++++ pool.go | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/bytebuffer_test.go b/bytebuffer_test.go index 7bb658f..d83b4dc 100644 --- a/bytebuffer_test.go +++ b/bytebuffer_test.go @@ -70,6 +70,10 @@ func TestByteBufferGetPutSerial(t *testing.T) { testByteBufferGetPut(t) } +func TestByteBufferGetLenPutSerial(t *testing.T) { + testByteBufferGetLenPut(t) +} + func TestByteBufferGetPutConcurrent(t *testing.T) { concurrency := 10 ch := make(chan struct{}, concurrency) @@ -89,6 +93,25 @@ func TestByteBufferGetPutConcurrent(t *testing.T) { } } +func TestByteBufferGetLenPutConcurrent(t *testing.T) { + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testByteBufferGetLenPut(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout!") + } + } +} + func testByteBufferGetPut(t *testing.T) { for i := 0; i < 10; i++ { expectedS := fmt.Sprintf("num %d", i) @@ -102,6 +125,24 @@ func testByteBufferGetPut(t *testing.T) { } } +func testByteBufferGetLenPut(t *testing.T) { + bytes := []byte("test len ") + for i := 0; i < 10; i++ { + expectedS := fmt.Sprintf("%s num %d", string(bytes), i) + b := GetLen(len(bytes)) + if len(b.B) != len(bytes) { + t.Fatalf("unexpected len: %d. Expecting %d", len(b.B), len(bytes)) + } + copy(b.B, bytes) + b.B = append(b.B, " num "...) + b.B = append(b.B, fmt.Sprintf("%d", i)...) + if string(b.B) != expectedS { + t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) + } + Put(b) + } +} + func testByteBufferGetString(t *testing.T) { for i := 0; i < 10; i++ { expectedS := fmt.Sprintf("num %d", i) diff --git a/pool.go b/pool.go index e66188b..e55fbf7 100644 --- a/pool.go +++ b/pool.go @@ -50,13 +50,45 @@ func Get() *ByteBuffer { return defaultPool.Get() } func (p *Pool) Get() *ByteBuffer { v := p.pool.Get() if v != nil { - return v.(*ByteBuffer) + b := v.(*ByteBuffer) + b.Reset() + return b } return &ByteBuffer{ B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), } } +// GetLen returns a buufer with its +// []byte slice of the exact len as specified +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func GetLen(s int) *ByteBuffer { return defaultPool.GetLen(s) } + +// GetLen return a buufer with its +// []byte slice of the exact len as specified +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) GetLen(s int) *ByteBuffer { + v := p.pool.Get() + if v == nil { + return &ByteBuffer{ + B: make([]byte, s), + } + } + + b := v.(*ByteBuffer) + if cap(b.B) < s { + // Create a new []byte slice + b.B = make([]byte, s) + } else { + b.B = b.B[:s] + } + return b +} + // Put returns byte buffer to the pool. // // ByteBuffer.B mustn't be touched after returning it to the pool. @@ -79,7 +111,6 @@ func (p *Pool) Put(b *ByteBuffer) { maxSize := int(atomic.LoadUint64(&p.maxSize)) if maxSize == 0 || cap(b.B) <= maxSize { - b.Reset() p.pool.Put(b) } } From edcb6053765d8a7c5491a528041dcb3cec55ab09 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Wed, 21 Mar 2018 16:08:06 +0100 Subject: [PATCH 07/13] Don't log if using standard pool --- standardpool.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/standardpool.go b/standardpool.go index 12571a6..2fb5c41 100644 --- a/standardpool.go +++ b/standardpool.go @@ -3,14 +3,9 @@ package bytebufferpool import ( - "log" "sync" ) type actualPool struct { pool sync.Pool } - -func init() { - log.Println("Using standard sync.pool") -} From 8b84991abaeab250942f1f29954648ff80a332e7 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Thu, 31 Jan 2019 22:36:15 +0100 Subject: [PATCH 08/13] Removed shortlived pool --- pool.go | 3 ++- shortlivedpool.go | 17 ----------------- standardpool.go | 11 ----------- 3 files changed, 2 insertions(+), 29 deletions(-) delete mode 100644 shortlivedpool.go delete mode 100644 standardpool.go diff --git a/pool.go b/pool.go index e55fbf7..8e95b75 100644 --- a/pool.go +++ b/pool.go @@ -2,6 +2,7 @@ package bytebufferpool import ( "sort" + "sync" "sync/atomic" ) @@ -31,7 +32,7 @@ type Pool struct { minBitSize uint64 minSize uint64 - actualPool // Conditional compilation on flag + pool sync.Pool } var defaultPool Pool diff --git a/shortlivedpool.go b/shortlivedpool.go deleted file mode 100644 index 5aee15a..0000000 --- a/shortlivedpool.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build shortlivedpool - -package bytebufferpool - -import ( - "log" - - "github.com/gallir/shortlivedpool" -) - -type actualPool struct { - pool shortlivedpool.Pool -} - -func init() { - log.Println("Using github.com/gallir/shortlivedpool instead of sync.pool") -} diff --git a/standardpool.go b/standardpool.go deleted file mode 100644 index 2fb5c41..0000000 --- a/standardpool.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !shortlivedpool - -package bytebufferpool - -import ( - "sync" -) - -type actualPool struct { - pool sync.Pool -} From fb73bd8290807d106ce092e4ef8d24d6b79bde83 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Tue, 19 Mar 2019 16:17:01 +0100 Subject: [PATCH 09/13] Extend isntead of just replacing slice and added tests --- pool.go | 4 +++- pool_test.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/pool.go b/pool.go index 8e95b75..c9e5e24 100644 --- a/pool.go +++ b/pool.go @@ -83,7 +83,9 @@ func (p *Pool) GetLen(s int) *ByteBuffer { b := v.(*ByteBuffer) if cap(b.B) < s { // Create a new []byte slice - b.B = make([]byte, s) + // b.B = make([]byte, s) + // Extend the slice + b.B = append(b.B[:cap(b.B)], make([]byte, s-cap(b.B))...) } else { b.B = b.B[:s] } diff --git a/pool_test.go b/pool_test.go index b357319..00a0fcd 100644 --- a/pool_test.go +++ b/pool_test.go @@ -92,3 +92,48 @@ func allocNBytes(dst []byte, n int) []byte { } return append(dst, make([]byte, diff)...) } + +func TestPoolGetLenVariousSizesSerial(t *testing.T) { + testPoolGetLenVariousSizesSerial(t) +} + +func testPoolGetLenVariousSizesSerial(t *testing.T) { + for i := 0; i < steps+1; i++ { + n := (1 << uint32(i)) + + testGetLenPut(t, n) + testGetLenPut(t, n+1) + testGetLenPut(t, n-1) + + for j := 0; j < 10; j++ { + testGetLenPut(t, j+n) + } + } +} + +func testGetLenPut(t *testing.T, n int) { + bb := GetLen(n) + if len(bb.B) != n { + t.Fatalf("wrong len returned by GetLen %d", n) + } + bb.B = allocNBytes(bb.B, n) + Put(bb) +} + +func TestPoolGetLenVariousSizesConcurrent(t *testing.T) { + concurrency := 5 + ch := make(chan struct{}) + for i := 0; i < concurrency; i++ { + go func() { + testPoolGetLenVariousSizesSerial(t) + ch <- struct{}{} + }() + } + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } + } +} From e492b2c5dd733e36de8f301bdb06068f88787bc5 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Wed, 20 Mar 2019 17:18:20 +0100 Subject: [PATCH 10/13] return a size similar to the bin's --- pool.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pool.go b/pool.go index c9e5e24..f14bf72 100644 --- a/pool.go +++ b/pool.go @@ -75,8 +75,12 @@ func GetLen(s int) *ByteBuffer { return defaultPool.GetLen(s) } func (p *Pool) GetLen(s int) *ByteBuffer { v := p.pool.Get() if v == nil { + size := int(p.minSize << uint(index(p.minBitSize, s))) + if size < s { + size = s + } return &ByteBuffer{ - B: make([]byte, s), + B: make([]byte, s, size), } } @@ -85,10 +89,14 @@ func (p *Pool) GetLen(s int) *ByteBuffer { // Create a new []byte slice // b.B = make([]byte, s) // Extend the slice - b.B = append(b.B[:cap(b.B)], make([]byte, s-cap(b.B))...) - } else { - b.B = b.B[:s] + size := int(p.minSize << uint(index(p.minBitSize, s))) + if size < s { + size = s + } + b.B = append(b.B[:cap(b.B)], make([]byte, size-cap(b.B))...) } + + b.B = b.B[:s] return b } From fe3bacba25343f6fdb1e798f65c630a55d83cc73 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Fri, 11 Sep 2020 18:34:01 +0200 Subject: [PATCH 11/13] Return to pool and create a new slice if cap is smaller --- pool.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/pool.go b/pool.go index f14bf72..12915fb 100644 --- a/pool.go +++ b/pool.go @@ -85,19 +85,20 @@ func (p *Pool) GetLen(s int) *ByteBuffer { } b := v.(*ByteBuffer) - if cap(b.B) < s { - // Create a new []byte slice - // b.B = make([]byte, s) - // Extend the slice - size := int(p.minSize << uint(index(p.minBitSize, s))) - if size < s { - size = s - } - b.B = append(b.B[:cap(b.B)], make([]byte, size-cap(b.B))...) + if cap(b.B) >= s { + b.B = b.B[:s] + return b } - b.B = b.B[:s] - return b + // The size is smaller, return it to the pool and create another one + p.pool.Put(b) + size := int(p.minSize << uint(index(p.minBitSize, s))) + if size < s { + size = s + } + return &ByteBuffer{ + B: make([]byte, s, size), + } } // Put returns byte buffer to the pool. From 1e3ef5045898caed8e8567986e1707fa4cc93974 Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Sun, 7 Aug 2022 21:12:24 +0200 Subject: [PATCH 12/13] Fixed mod --- bytebuffer_example_test.go | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bytebuffer_example_test.go b/bytebuffer_example_test.go index 1cbaaf5..a38d7fb 100644 --- a/bytebuffer_example_test.go +++ b/bytebuffer_example_test.go @@ -3,7 +3,7 @@ package bytebufferpool_test import ( "fmt" - "github.com/valyala/bytebufferpool" + "github.com/gallir/bytebufferpool" ) func ExampleByteBuffer() { diff --git a/go.mod b/go.mod index be783c1..d498416 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ -module github.com/valyala/bytebufferpool +module github.com/gallir/bytebufferpool go 1.12 From 6f29468cb7b924d85959c292a916f2059c3ba3bc Mon Sep 17 00:00:00 2001 From: Ricardo Galli Date: Sun, 7 Aug 2022 21:14:51 +0200 Subject: [PATCH 13/13] Added String() with no copy --- bytebuffer.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/bytebuffer.go b/bytebuffer.go index 07a055a..3154920 100644 --- a/bytebuffer.go +++ b/bytebuffer.go @@ -1,6 +1,9 @@ package bytebufferpool -import "io" +import ( + "io" + "unsafe" +) // ByteBuffer provides byte buffer, which can be used for minimizing // memory allocations. @@ -102,7 +105,7 @@ func (b *ByteBuffer) SetString(s string) { // String returns string representation of ByteBuffer.B. func (b *ByteBuffer) String() string { - return string(b.B) + return *(*string)(unsafe.Pointer(&b.B)) } // Reset makes ByteBuffer.B empty.