From 0007fe3c684cbd83440966ead964c246870918d6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 26 Apr 2024 09:37:25 +0000 Subject: [PATCH] fix(deps): update patch digest dependencies --- go.mod | 6 +- go.sum | 6 + vendor/github.com/gammazero/deque/README.md | 12 +- vendor/github.com/gammazero/deque/deque.go | 248 +++++++++++++----- vendor/github.com/gammazero/deque/doc.go | 29 +- .../github.com/gammazero/workerpool/README.md | 8 +- vendor/github.com/gammazero/workerpool/doc.go | 50 ++-- .../gammazero/workerpool/workerpool.go | 68 +++-- vendor/github.com/pierrec/lz4/v4/README.md | 2 +- .../pierrec/lz4/v4/compressing_reader.go | 222 ++++++++++++++++ .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4block/decode_arm64.s | 15 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- vendor/github.com/pierrec/lz4/v4/options.go | 28 ++ vendor/github.com/pierrec/lz4/v4/writer.go | 4 + vendor/modules.txt | 10 +- 16 files changed, 554 insertions(+), 163 deletions(-) create mode 100644 vendor/github.com/pierrec/lz4/v4/compressing_reader.go diff --git a/go.mod b/go.mod index 686073872..7153b4ad1 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,9 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 github.com/aws/aws-sdk-go v1.34.2 - github.com/gammazero/workerpool v1.1.2 + github.com/gammazero/workerpool v1.1.3 github.com/google/uuid v1.3.0 - github.com/pierrec/lz4/v4 v4.1.17 + github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.6.0 github.com/slok/goresilience v0.2.0 @@ -28,7 +28,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/gammazero/deque v0.1.0 // indirect + github.com/gammazero/deque v0.2.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect diff --git a/go.sum b/go.sum index 34801d935..04370b217 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,12 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= +github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= +github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -97,6 +101,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/vendor/github.com/gammazero/deque/README.md b/vendor/github.com/gammazero/deque/README.md index 45a064701..78f99c4ca 100644 --- a/vendor/github.com/gammazero/deque/README.md +++ b/vendor/github.com/gammazero/deque/README.md @@ -30,7 +30,15 @@ For maximum speed, this deque implementation leaves concurrency safety up to the ## Reading Empty Deque -Since it is OK for the deque to contain a nil value, it is necessary to either panic or return a second boolean value to indicate the deque is empty, when reading or removing an element. This deque panics when reading from an empty deque. This is a run-time check to help catch programming errors, which may be missed if a second return value is ignored. Simply check Deque.Len() before reading from the deque. +Since it is OK for the deque to contain a `nil` value, it is necessary to either panic or return a second boolean value to indicate the deque is empty, when reading or removing an element. This deque panics when reading from an empty deque. This is a run-time check to help catch programming errors, which may be missed if a second return value is ignored. Simply check `Deque.Len()` before reading from the deque. + +## Generics + +Deque uses generics to create a Deque that contains items of the type specified. To create a Deque that holds a specific type, provide a type argument to New or with the variable declaration. For example: +```go + stringDeque := deque.New[string]() + var intDeque deque.Deque[int] +``` ## Example @@ -43,7 +51,7 @@ import ( ) func main() { - var q deque.Deque + var q deque.Deque[string] q.PushBack("foo") q.PushBack("bar") q.PushBack("baz") diff --git a/vendor/github.com/gammazero/deque/deque.go b/vendor/github.com/gammazero/deque/deque.go index 7baed9d97..a120ccf7e 100644 --- a/vendor/github.com/gammazero/deque/deque.go +++ b/vendor/github.com/gammazero/deque/deque.go @@ -1,12 +1,13 @@ package deque -// minCapacity is the smallest capacity that deque may have. -// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +// minCapacity is the smallest capacity that deque may have. Must be power of 2 +// for bitwise modulus: x % n == x & (n - 1). const minCapacity = 16 -// Deque represents a single instance of the deque data structure. -type Deque struct { - buf []interface{} +// Deque represents a single instance of the deque data structure. A Deque +// instance contains items of the type sepcified by the type argument. +type Deque[T any] struct { + buf []T head int tail int count int @@ -14,18 +15,22 @@ type Deque struct { } // New creates a new Deque, optionally setting the current and minimum capacity -// when non-zero values are given for these. +// when non-zero values are given for these. The Deque instance returns +// operates on items of the type specified by the type argument. For example, +// to create a Deque that contains strings, // -// To create a Deque with capacity to store 2048 items without resizing, and -// that will not resize below space for 32 items when removing itmes: -// d := deque.New(2048, 32) +// stringDeque := deque.New[string]() +// +// To create a Deque with capacity to store 2048 ints without resizing, and +// that will not resize below space for 32 items when removing items: +// d := deque.New[int](2048, 32) // // To create a Deque that has not yet allocated memory, but after it does will // never resize to have space for less than 64 items: -// d := deque.New(0, 64) +// d := deque.New[int](0, 64) // -// Note that any values supplied here are rounded up to the nearest power of 2. -func New(size ...int) *Deque { +// Any size values supplied here are rounded up to the nearest power of 2. +func New[T any](size ...int) *Deque[T] { var capacity, minimum int if len(size) >= 1 { capacity = size[0] @@ -39,35 +44,42 @@ func New(size ...int) *Deque { minCap <<= 1 } - var buf []interface{} + var buf []T if capacity != 0 { bufSize := minCap for bufSize < capacity { bufSize <<= 1 } - buf = make([]interface{}, bufSize) + buf = make([]T, bufSize) } - return &Deque{ + return &Deque[T]{ buf: buf, minCap: minCap, } } -// Cap returns the current capacity of the Deque. -func (q *Deque) Cap() int { +// Cap returns the current capacity of the Deque. If q is nil, q.Cap() is zero. +func (q *Deque[T]) Cap() int { + if q == nil { + return 0 + } return len(q.buf) } -// Len returns the number of elements currently stored in the queue. -func (q *Deque) Len() int { +// Len returns the number of elements currently stored in the queue. If q is +// nil, q.Len() is zero. +func (q *Deque[T]) Len() int { + if q == nil { + return 0 + } return q.count } -// PushBack appends an element to the back of the queue. Implements FIFO when +// PushBack appends an element to the back of the queue. Implements FIFO when // elements are removed with PopFront(), and LIFO when elements are removed // with PopBack(). -func (q *Deque) PushBack(elem interface{}) { +func (q *Deque[T]) PushBack(elem T) { q.growIfFull() q.buf[q.tail] = elem @@ -77,7 +89,7 @@ func (q *Deque) PushBack(elem interface{}) { } // PushFront prepends an element to the front of the queue. -func (q *Deque) PushFront(elem interface{}) { +func (q *Deque[T]) PushFront(elem T) { q.growIfFull() // Calculate new head position. @@ -87,14 +99,15 @@ func (q *Deque) PushFront(elem interface{}) { } // PopFront removes and returns the element from the front of the queue. -// Implements FIFO when used with PushBack(). If the queue is empty, the call +// Implements FIFO when used with PushBack(). If the queue is empty, the call // panics. -func (q *Deque) PopFront() interface{} { +func (q *Deque[T]) PopFront() T { if q.count <= 0 { panic("deque: PopFront() called on empty queue") } ret := q.buf[q.head] - q.buf[q.head] = nil + var zero T + q.buf[q.head] = zero // Calculate new head position. q.head = q.next(q.head) q.count-- @@ -104,9 +117,9 @@ func (q *Deque) PopFront() interface{} { } // PopBack removes and returns the element from the back of the queue. -// Implements LIFO when used with PushBack(). If the queue is empty, the call +// Implements LIFO when used with PushBack(). If the queue is empty, the call // panics. -func (q *Deque) PopBack() interface{} { +func (q *Deque[T]) PopBack() T { if q.count <= 0 { panic("deque: PopBack() called on empty queue") } @@ -116,27 +129,27 @@ func (q *Deque) PopBack() interface{} { // Remove value at tail. ret := q.buf[q.tail] - q.buf[q.tail] = nil + var zero T + q.buf[q.tail] = zero q.count-- q.shrinkIfExcess() return ret } -// Front returns the element at the front of the queue. This is the element -// that would be returned by PopFront(). This call panics if the queue is +// Front returns the element at the front of the queue. This is the element +// that would be returned by PopFront(). This call panics if the queue is // empty. -func (q *Deque) Front() interface{} { +func (q *Deque[T]) Front() T { if q.count <= 0 { panic("deque: Front() called when empty") } return q.buf[q.head] } -// Back returns the element at the back of the queue. This is the element -// that would be returned by PopBack(). This call panics if the queue is -// empty. -func (q *Deque) Back() interface{} { +// Back returns the element at the back of the queue. This is the element that +// would be returned by PopBack(). This call panics if the queue is empty. +func (q *Deque[T]) Back() T { if q.count <= 0 { panic("deque: Back() called when empty") } @@ -144,18 +157,18 @@ func (q *Deque) Back() interface{} { } // At returns the element at index i in the queue without removing the element -// from the queue. This method accepts only non-negative index values. At(0) -// refers to the first element and is the same as Front(). At(Len()-1) refers -// to the last element and is the same as Back(). If the index is invalid, the +// from the queue. This method accepts only non-negative index values. At(0) +// refers to the first element and is the same as Front(). At(Len()-1) refers +// to the last element and is the same as Back(). If the index is invalid, the // call panics. // // The purpose of At is to allow Deque to serve as a more general purpose // circular buffer, where items are only added to and removed from the ends of -// the deque, but may be read from any place within the deque. Consider the +// the deque, but may be read from any place within the deque. Consider the // case of a fixed-size circular log buffer: A new entry is pushed onto one end -// and when full the oldest is popped from the other end. All the log entries +// and when full the oldest is popped from the other end. All the log entries // in the buffer must be readable without altering the buffer contents. -func (q *Deque) At(i int) interface{} { +func (q *Deque[T]) At(i int) T { if i < 0 || i >= q.count { panic("deque: At() called with index out of range") } @@ -164,9 +177,9 @@ func (q *Deque) At(i int) interface{} { } // Set puts the element at index i in the queue. Set shares the same purpose -// than At() but perform the opposite operation. The index i is the same -// index defined by At(). If the index is invalid, the call panics. -func (q *Deque) Set(i int, elem interface{}) { +// than At() but perform the opposite operation. The index i is the same index +// defined by At(). If the index is invalid, the call panics. +func (q *Deque[T]) Set(i int, elem T) { if i < 0 || i >= q.count { panic("deque: Set() called with index out of range") } @@ -176,25 +189,27 @@ func (q *Deque) Set(i int, elem interface{}) { // Clear removes all elements from the queue, but retains the current capacity. // This is useful when repeatedly reusing the queue at high frequency to avoid -// GC during reuse. The queue will not be resized smaller as long as items are -// only added. Only when items are removed is the queue subject to getting +// GC during reuse. The queue will not be resized smaller as long as items are +// only added. Only when items are removed is the queue subject to getting // resized smaller. -func (q *Deque) Clear() { +func (q *Deque[T]) Clear() { // bitwise modulus modBits := len(q.buf) - 1 + var zero T for h := q.head; h != q.tail; h = (h + 1) & modBits { - q.buf[h] = nil + q.buf[h] = zero } q.head = 0 q.tail = 0 q.count = 0 } -// Rotate rotates the deque n steps front-to-back. If n is negative, rotates -// back-to-front. Having Deque provide Rotate() avoids resizing that could -// happen if implementing rotation using only Pop and Push methods. -func (q *Deque) Rotate(n int) { - if q.count <= 1 { +// Rotate rotates the deque n steps front-to-back. If n is negative, rotates +// back-to-front. Having Deque provide Rotate() avoids resizing that could +// happen if implementing rotation using only Pop and Push methods. If q.Len() +// is one or less, or q is nil, then Rotate does nothing. +func (q *Deque[T]) Rotate(n int) { + if q.Len() <= 1 { return } // Rotating a multiple of q.count is same as no rotation. @@ -208,10 +223,12 @@ func (q *Deque) Rotate(n int) { if q.head == q.tail { // Calculate new head and tail using bitwise modulus. q.head = (q.head + n) & modBits - q.tail = (q.tail + n) & modBits + q.tail = q.head return } + var zero T + if n < 0 { // Rotate back to front. for ; n < 0; n++ { @@ -220,7 +237,7 @@ func (q *Deque) Rotate(n int) { q.tail = (q.tail - 1) & modBits // Put tail value at head and remove value at tail. q.buf[q.head] = q.buf[q.tail] - q.buf[q.tail] = nil + q.buf[q.tail] = zero } return } @@ -229,21 +246,116 @@ func (q *Deque) Rotate(n int) { for ; n > 0; n-- { // Put head value at tail and remove value at head. q.buf[q.tail] = q.buf[q.head] - q.buf[q.head] = nil + q.buf[q.head] = zero // Calculate new head and tail using bitwise modulus. q.head = (q.head + 1) & modBits q.tail = (q.tail + 1) & modBits } } -// SetMinCapacity sets a minimum capacity of 2^minCapacityExp. If the value of +// Index returns the index into the Deque of the first item satisfying f(item), +// or -1 if none do. If q is nil, then -1 is always returned. Search is linear +// starting with index 0. +func (q *Deque[T]) Index(f func(T) bool) int { + if q.Len() > 0 { + modBits := len(q.buf) - 1 + for i := 0; i < q.count; i++ { + if f(q.buf[(q.head+i)&modBits]) { + return i + } + } + } + return -1 +} + +// RIndex is the same as Index, but searches from Back to Front. The index +// returned is from Front to Back, where index 0 is the index of the item +// returned by Front(). +func (q *Deque[T]) RIndex(f func(T) bool) int { + if q.Len() > 0 { + modBits := len(q.buf) - 1 + for i := q.count - 1; i >= 0; i-- { + if f(q.buf[(q.head+i)&modBits]) { + return i + } + } + } + return -1 +} + +// Insert is used to insert an element into the middle of the queue, before the +// element at the specified index. Insert(0,e) is the same as PushFront(e) and +// Insert(Len(),e) is the same as PushBack(e). Accepts only non-negative index +// values, and panics if index is out of range. +// +// Important: Deque is optimized for O(1) operations at the ends of the queue, +// not for operations in the the middle. Complexity of this function is +// constant plus linear in the lesser of the distances between the index and +// either of the ends of the queue. +func (q *Deque[T]) Insert(at int, item T) { + if at < 0 || at > q.count { + panic("deque: Insert() called with index out of range") + } + if at*2 < q.count { + q.PushFront(item) + front := q.head + for i := 0; i < at; i++ { + next := q.next(front) + q.buf[front], q.buf[next] = q.buf[next], q.buf[front] + front = next + } + return + } + swaps := q.count - at + q.PushBack(item) + back := q.prev(q.tail) + for i := 0; i < swaps; i++ { + prev := q.prev(back) + q.buf[back], q.buf[prev] = q.buf[prev], q.buf[back] + back = prev + } +} + +// Remove removes and returns an element from the middle of the queue, at the +// specified index. Remove(0) is the same as PopFront() and Remove(Len()-1) is +// the same as PopBack(). Accepts only non-negative index values, and panics if +// index is out of range. +// +// Important: Deque is optimized for O(1) operations at the ends of the queue, +// not for operations in the the middle. Complexity of this function is +// constant plus linear in the lesser of the distances between the index and +// either of the ends of the queue. +func (q *Deque[T]) Remove(at int) T { + if at < 0 || at >= q.Len() { + panic("deque: Remove() called with index out of range") + } + + rm := (q.head + at) & (len(q.buf) - 1) + if at*2 < q.count { + for i := 0; i < at; i++ { + prev := q.prev(rm) + q.buf[prev], q.buf[rm] = q.buf[rm], q.buf[prev] + rm = prev + } + return q.PopFront() + } + swaps := q.count - at - 1 + for i := 0; i < swaps; i++ { + next := q.next(rm) + q.buf[rm], q.buf[next] = q.buf[next], q.buf[rm] + rm = next + } + return q.PopBack() +} + +// SetMinCapacity sets a minimum capacity of 2^minCapacityExp. If the value of // the minimum capacity is less than or equal to the minimum allowed, then -// capacity is set to the minimum allowed. This may be called at anytime to -// set a new minimum capacity. +// capacity is set to the minimum allowed. This may be called at anytime to set +// a new minimum capacity. // // Setting a larger minimum capacity may be used to prevent resizing when the // number of stored items changes frequently across a wide range. -func (q *Deque) SetMinCapacity(minCapacityExp uint) { +func (q *Deque[T]) SetMinCapacity(minCapacityExp uint) { if 1< minCapacity { q.minCap = 1 << minCapacityExp } else { @@ -252,17 +364,17 @@ func (q *Deque) SetMinCapacity(minCapacityExp uint) { } // prev returns the previous buffer position wrapping around buffer. -func (q *Deque) prev(i int) int { +func (q *Deque[T]) prev(i int) int { return (i - 1) & (len(q.buf) - 1) // bitwise modulus } // next returns the next buffer position wrapping around buffer. -func (q *Deque) next(i int) int { +func (q *Deque[T]) next(i int) int { return (i + 1) & (len(q.buf) - 1) // bitwise modulus } // growIfFull resizes up if the buffer is full. -func (q *Deque) growIfFull() { +func (q *Deque[T]) growIfFull() { if q.count != len(q.buf) { return } @@ -270,24 +382,24 @@ func (q *Deque) growIfFull() { if q.minCap == 0 { q.minCap = minCapacity } - q.buf = make([]interface{}, q.minCap) + q.buf = make([]T, q.minCap) return } q.resize() } // shrinkIfExcess resize down if the buffer 1/4 full. -func (q *Deque) shrinkIfExcess() { +func (q *Deque[T]) shrinkIfExcess() { if len(q.buf) > q.minCap && (q.count<<2) == len(q.buf) { q.resize() } } -// resize resizes the deque to fit exactly twice its current contents. This is +// resize resizes the deque to fit exactly twice its current contents. This is // used to grow the queue when it is full, and also to shrink it when it is // only a quarter full. -func (q *Deque) resize() { - newBuf := make([]interface{}, q.count<<1) +func (q *Deque[T]) resize() { + newBuf := make([]T, q.count<<1) if q.tail > q.head { copy(newBuf, q.buf[q.head:q.tail]) } else { diff --git a/vendor/github.com/gammazero/deque/doc.go b/vendor/github.com/gammazero/deque/doc.go index c9647f983..3e6832b98 100644 --- a/vendor/github.com/gammazero/deque/doc.go +++ b/vendor/github.com/gammazero/deque/doc.go @@ -3,16 +3,15 @@ Package deque provides a fast ring-buffer deque (double-ended queue) implementation. Deque generalizes a queue and a stack, to efficiently add and remove items at -either end with O(1) performance. Queue (FIFO) operations are supported using -PushBack() and PopFront(). Stack (LIFO) operations are supported using +either end with O(1) performance. Queue (FIFO) operations are supported using +PushBack() and PopFront(). Stack (LIFO) operations are supported using PushBack() and PopBack(). Ring-buffer Performance -The ring-buffer automatically resizes by -powers of two, growing when additional capacity is needed and shrinking when -only a quarter of the capacity is used, and uses bitwise arithmetic for all -calculations. +The ring-buffer automatically resizes by powers of two, growing when additional +capacity is needed and shrinking when only a quarter of the capacity is used, +and uses bitwise arithmetic for all calculations. The ring-buffer implementation significantly improves memory and time performance with fewer GC pauses, compared to implementations based on slices @@ -23,12 +22,18 @@ the application to provide, however the application chooses, if needed at all. Reading Empty Deque -Since it is OK for the deque to contain a nil value, it is necessary to either -panic or return a second boolean value to indicate the deque is empty, when -reading or removing an element. This deque panics when reading from an empty -deque. This is a run-time check to help catch programming errors, which may be -missed if a second return value is ignored. Simply check Deque.Len() before -reading from the deque. +Since it is OK for the deque to contain the zero-value of an item, it is +necessary to either panic or return a second boolean value to indicate the +deque is empty, when reading or removing an element. This deque panics when +reading from an empty deque. This is a run-time check to help catch programming +errors, which may be missed if a second return value is ignored. Simply check +Deque.Len() before reading from the deque. + +Generics + +Deque uses generics to create a Deque that contains items of the type +specified. To create a Deque that holds a specific type, provide a type +argument to New or with the variable declaration. */ package deque diff --git a/vendor/github.com/gammazero/workerpool/README.md b/vendor/github.com/gammazero/workerpool/README.md index 75c0b9c83..effcf6728 100644 --- a/vendor/github.com/gammazero/workerpool/README.md +++ b/vendor/github.com/gammazero/workerpool/README.md @@ -14,12 +14,14 @@ This implementation builds on ideas from the following: - http://nesv.github.io/golang/2014/02/25/worker-queues-in-go.html ## Installation -To install this package, you need to setup your Go workspace. The simplest way to install the library is to run: + +To install this package, you need to setup your Go workspace. The simplest way to install the library is to run: ``` $ go get github.com/gammazero/workerpool ``` ## Example + ```go package main @@ -43,6 +45,8 @@ func main() { } ``` +[Example wrapper function](https://go.dev/play/p/BWnRhJYarZ1) to show start and finish time of submitted function. + ## Usage Note -There is no upper limit on the number of tasks queued, other than the limits of system resources. If the number of inbound tasks is too many to even queue for pending processing, then the solution is outside the scope of workerpool. If should be solved by distributing load over multiple systems, and/or storing input for pending processing in intermediate storage such as a file system, distributed message queue, etc. +There is no upper limit on the number of tasks queued, other than the limits of system resources. If the number of inbound tasks is too many to even queue for pending processing, then the solution is outside the scope of workerpool. It should be solved by distributing workload over multiple systems, and/or storing input for pending processing in intermediate storage such as a file system, distributed message queue, etc. diff --git a/vendor/github.com/gammazero/workerpool/doc.go b/vendor/github.com/gammazero/workerpool/doc.go index c189fde82..fe4efcdf2 100644 --- a/vendor/github.com/gammazero/workerpool/doc.go +++ b/vendor/github.com/gammazero/workerpool/doc.go @@ -1,25 +1,25 @@ /* Package workerpool queues work to a limited number of goroutines. -The purpose of the worker pool is to limit the concurrency of tasks -executed by the workers. This is useful when performing tasks that require -sufficient resources (CPU, memory, etc.), and running too many tasks at the -same time would exhaust resources. +The purpose of the worker pool is to limit the concurrency of tasks executed by +the workers. This is useful when performing tasks that require sufficient +resources (CPU, memory, etc.), and running too many tasks at the same time +would exhaust resources. Non-blocking task submission -A task is a function submitted to the worker pool for execution. Submitting +A task is a function submitted to the worker pool for execution. Submitting tasks to this worker pool will not block, regardless of the number of tasks. -Incoming tasks are immediately dispatched to an available -worker. If no worker is immediately available, or there are already tasks -waiting for an available worker, then the task is put on a waiting queue to -wait for an available worker. +Incoming tasks are immediately dispatched to an available worker. If no worker +is immediately available, or there are already tasks waiting for an available +worker, then the task is put on a waiting queue to wait for an available +worker. The intent of the worker pool is to limit the concurrency of task execution, -not limit the number of tasks queued to be executed. Therefore, this unbounded -input of tasks is acceptable as the tasks cannot be discarded. If the number -of inbound tasks is too many to even queue for pending processing, then the -solution is outside the scope of workerpool, and should be solved by +not limit the number of tasks queued to be executed. Therefore, this unbounded +input of tasks is acceptable as the tasks cannot be discarded. If the number of +inbound tasks is too many to even queue for pending processing, then the +solution is outside the scope of workerpool. It should be solved by distributing load over multiple systems, and/or storing input for pending processing in intermediate storage such as a database, file system, distributed message queue, etc. @@ -27,31 +27,31 @@ message queue, etc. Dispatcher This worker pool uses a single dispatcher goroutine to read tasks from the -input task queue and dispatch them to worker goroutines. This allows for a +input task queue and dispatch them to worker goroutines. This allows for a small input channel, and lets the dispatcher queue as many tasks as are -submitted when there are no available workers. Additionally, the dispatcher -can adjust the number of workers as appropriate for the work load, without -having to utilize locked counters and checks incurred on task submission. +submitted when there are no available workers. Additionally, the dispatcher can +adjust the number of workers as appropriate for the work load, without having +to utilize locked counters and checks incurred on task submission. When no tasks have been submitted for a period of time, a worker is removed by -the dispatcher. This is done until there are no more workers to remove. The +the dispatcher. This is done until there are no more workers to remove. The minimum number of workers is always zero, because the time to start new workers is insignificant. Usage note It is advisable to use different worker pools for tasks that are bound by -different resources, or that have different resource use patterns. For -example, tasks that use X Mb of memory may need different concurrency limits -than tasks that use Y Mb of memory. +different resources, or that have different resource use patterns. For example, +tasks that use X Mb of memory may need different concurrency limits than tasks +that use Y Mb of memory. Waiting queue vs goroutines When there are no available workers to handle incoming tasks, the tasks are put -on a waiting queue, in this implementation. In implementations mentioned in -the credits below, these tasks were passed to goroutines. Using a queue is -faster and has less memory overhead than creating a separate goroutine for each -waiting task, allowing a much higher number of waiting tasks. Also, using a +on a waiting queue, in this implementation. In implementations mentioned in the +credits below, these tasks were passed to goroutines. Using a queue is faster +and has less memory overhead than creating a separate goroutine for each +waiting task, allowing a much higher number of waiting tasks. Also, using a waiting queue ensures that tasks are given to workers in the order the tasks were received. diff --git a/vendor/github.com/gammazero/workerpool/workerpool.go b/vendor/github.com/gammazero/workerpool/workerpool.go index 4ccca1276..b7ac3752c 100644 --- a/vendor/github.com/gammazero/workerpool/workerpool.go +++ b/vendor/github.com/gammazero/workerpool/workerpool.go @@ -17,7 +17,7 @@ const ( // New creates and starts a pool of worker goroutines. // // The maxWorkers parameter specifies the maximum number of workers that can -// execute tasks concurrently. When there are no incoming tasks, workers are +// execute tasks concurrently. When there are no incoming tasks, workers are // gradually stopped until there are no remaining workers. func New(maxWorkers int) *WorkerPool { // There must be at least one worker. @@ -27,7 +27,7 @@ func New(maxWorkers int) *WorkerPool { pool := &WorkerPool{ maxWorkers: maxWorkers, - taskQueue: make(chan func(), 1), + taskQueue: make(chan func()), workerQueue: make(chan func()), stopSignal: make(chan struct{}), stoppedChan: make(chan struct{}), @@ -47,7 +47,7 @@ type WorkerPool struct { workerQueue chan func() stoppedChan chan struct{} stopSignal chan struct{} - waitingQueue deque.Deque + waitingQueue deque.Deque[func()] stopLock sync.Mutex stopOnce sync.Once stopped bool @@ -61,8 +61,8 @@ func (p *WorkerPool) Size() int { } // Stop stops the worker pool and waits for only currently running tasks to -// complete. Pending tasks that are not currently running are abandoned. -// Tasks must not be submitted to the worker pool after calling stop. +// complete. Pending tasks that are not currently running are abandoned. Tasks +// must not be submitted to the worker pool after calling stop. // // Since creating the worker pool starts at least one goroutine, for the // dispatcher, Stop() or StopWait() should be called when the worker pool is no @@ -72,7 +72,7 @@ func (p *WorkerPool) Stop() { } // StopWait stops the worker pool and waits for all queued tasks tasks to -// complete. No additional tasks may be submitted, but all pending tasks are +// complete. No additional tasks may be submitted, but all pending tasks are // executed by workers before this function returns. func (p *WorkerPool) StopWait() { p.stop(true) @@ -88,20 +88,20 @@ func (p *WorkerPool) Stopped() bool { // Submit enqueues a function for a worker to execute. // // Any external values needed by the task function must be captured in a -// closure. Any return values should be returned over a channel that is +// closure. Any return values should be returned over a channel that is // captured in the task function closure. // -// Submit will not block regardless of the number of tasks submitted. Each -// task is immediately given to an available worker or to a newly started -// worker. If there are no available workers, and the maximum number of -// workers are already created, then the task is put onto a waiting queue. +// Submit will not block regardless of the number of tasks submitted. Each task +// is immediately given to an available worker or to a newly started worker. If +// there are no available workers, and the maximum number of workers are +// already created, then the task is put onto a waiting queue. // // When there are tasks on the waiting queue, any additional new tasks are put -// on the waiting queue. Tasks are removed from the waiting queue as workers +// on the waiting queue. Tasks are removed from the waiting queue as workers // become available. // // As long as no new tasks arrive, one available worker is shutdown each time -// period until there are no more idle workers. Since the time to start new +// period until there are no more idle workers. Since the time to start new // goroutines is not significant, there is no need to retain idle workers // indefinitely. func (p *WorkerPool) Submit(task func()) { @@ -129,12 +129,12 @@ func (p *WorkerPool) WaitingQueueSize() int { } // Pause causes all workers to wait on the given Context, thereby making them -// unavailable to run tasks. Pause returns when all workers are waiting. -// Tasks can continue to be queued to the workerpool, but are not executed -// until the Context is canceled or times out. +// unavailable to run tasks. Pause returns when all workers are waiting. Tasks +// can continue to be queued to the workerpool, but are not executed until the +// Context is canceled or times out. // // Calling Pause when the worker pool is already paused causes Pause to wait -// until all previous pauses are canceled. This allows a goroutine to take +// until all previous pauses are canceled. This allows a goroutine to take // control of pausing and unpausing the pool as soon as other goroutines have // unpaused it. // @@ -167,12 +167,13 @@ func (p *WorkerPool) dispatch() { timeout := time.NewTimer(idleTimeout) var workerCount int var idle bool + var wg sync.WaitGroup Loop: for { // As long as tasks are in the waiting queue, incoming tasks are put // into the waiting queue and tasks to run are taken from the waiting - // queue. Once the waiting queue is empty, then go back to submitting + // queue. Once the waiting queue is empty, then go back to submitting // incoming tasks directly to available workers. if p.waitingQueue.Len() != 0 { if !p.processWaitingQueue() { @@ -192,7 +193,8 @@ Loop: default: // Create a new worker, if not at max. if workerCount < p.maxWorkers { - go startWorker(task, p.workerQueue) + wg.Add(1) + go worker(task, p.workerQueue, &wg) workerCount++ } else { // Enqueue task to be executed by next available worker. @@ -202,7 +204,7 @@ Loop: } idle = false case <-timeout.C: - // Timed out waiting for work to arrive. Kill a ready worker if + // Timed out waiting for work to arrive. Kill a ready worker if // pool has been idle for a whole timeout. if idle && workerCount > 0 { if p.killIdleWorker() { @@ -224,24 +226,18 @@ Loop: p.workerQueue <- nil workerCount-- } + wg.Wait() timeout.Stop() } -// startWorker runs initial task, then starts a worker waiting for more. -func startWorker(task func(), workerQueue chan func()) { - task() - go worker(workerQueue) -} - // worker executes tasks and stops when it receives a nil task. -func worker(workerQueue chan func()) { - for task := range workerQueue { - if task == nil { - return - } +func worker(task func(), workerQueue chan func(), wg *sync.WaitGroup) { + for task != nil { task() + task = <-workerQueue } + wg.Done() } // stop tells the dispatcher to exit, and whether or not to complete queued @@ -250,11 +246,11 @@ func (p *WorkerPool) stop(wait bool) { p.stopOnce.Do(func() { // Signal that workerpool is stopping, to unpause any paused workers. close(p.stopSignal) - // Acquire stopLock to wait for any pause in progress to complete. All + // Acquire stopLock to wait for any pause in progress to complete. All // in-progress pauses will complete because the stopSignal unpauses the // workers. p.stopLock.Lock() - // The stopped flag prevents any additional paused workers. This makes + // The stopped flag prevents any additional paused workers. This makes // it safe to close the taskQueue. p.stopped = true p.stopLock.Unlock() @@ -275,7 +271,7 @@ func (p *WorkerPool) processWaitingQueue() bool { return false } p.waitingQueue.PushBack(task) - case p.workerQueue <- p.waitingQueue.Front().(func()): + case p.workerQueue <- p.waitingQueue.Front(): // A worker was ready, so gave task to worker. p.waitingQueue.PopFront() } @@ -289,7 +285,7 @@ func (p *WorkerPool) killIdleWorker() bool { // Sent kill signal to worker. return true default: - // No ready workers. All, if any, workers are busy. + // No ready workers. All, if any, workers are busy. return false } } @@ -299,7 +295,7 @@ func (p *WorkerPool) killIdleWorker() bool { func (p *WorkerPool) runQueuedTasks() { for p.waitingQueue.Len() != 0 { // A worker is ready, so give task to worker. - p.workerQueue <- p.waitingQueue.PopFront().(func()) + p.workerQueue <- p.waitingQueue.PopFront() atomic.StoreInt32(&p.waiting, int32(p.waitingQueue.Len())) } } diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md index 4629c9d0e..dee77545b 100644 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -21,7 +21,7 @@ go get github.com/pierrec/lz4/v4 There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/v4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 000000000..8df0dc76d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index a1bfa99e4..138083d94 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,12 +8,9 @@ const ( Block256Kb Block1Mb Block4Mb + Block8Mb = 2 * Block4Mb ) -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s index c43e8a8d2..d2fe11b8e 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -185,7 +185,7 @@ copyMatchTry8: // A 16-at-a-time loop doesn't provide a further speedup. CMP $8, len CCMP HS, offset, $8, $0 - BLO copyMatchLoop1 + BLO copyMatchTry4 AND $7, len, lenRem SUB $8, len @@ -201,8 +201,19 @@ copyMatchLoop8: MOVD tmp2, -8(dst) B copyMatchDone +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + copyMatchLoop1: - // Byte-at-a-time copy for small offsets. + // Byte-at-a-time copy for small offsets <= 3. MOVBU.P 1(match), tmp2 MOVB.P tmp2, 1(dst) SUBS $1, len diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index 459086f09..e96465460 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,9 +224,7 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] + data = data[:cap(data)] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go index 46a873803..57a44e767 100644 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -57,6 +57,13 @@ func BlockSizeOption(size BlockSize) Option { } w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -72,6 +79,9 @@ func BlockChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.BlockChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -87,6 +97,9 @@ func ChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.ContentChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -104,6 +117,10 @@ func SizeOption(size uint64) Option { w.frame.Descriptor.Flags.SizeSet(size > 0) w.frame.Descriptor.ContentSize = size return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil } return lz4errors.ErrOptionNotApplicable } @@ -162,6 +179,14 @@ func CompressionLevelOption(level CompressionLevel) Option { } w.level = lz4block.CompressionLevel(level) return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -186,6 +211,9 @@ func OnBlockDoneOption(handler func(size int)) Option { case *Reader: rw.handler = handler return nil + case *CompressingReader: + rw.handler = handler + return nil } return lz4errors.ErrOptionNotApplicable } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 77699f2b5..4358adee1 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -150,6 +150,10 @@ func (w *Writer) Flush() (err error) { case writeState: case errorState: return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } default: return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 626232c0c..0bafb82a3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -74,11 +74,11 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/gammazero/deque v0.1.0 -## explicit; go 1.15 +# github.com/gammazero/deque v0.2.0 +## explicit; go 1.18 github.com/gammazero/deque -# github.com/gammazero/workerpool v1.1.2 -## explicit; go 1.15 +# github.com/gammazero/workerpool v1.1.3 +## explicit; go 1.18 github.com/gammazero/workerpool # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 @@ -111,7 +111,7 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo -# github.com/pierrec/lz4/v4 v4.1.17 +# github.com/pierrec/lz4/v4 v4.1.21 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block