__".
+// If os.PathSeparator != "/", it is also escaped.
+// Additionally, the "/" in "../", the trailing "/" in "//", and a trailing
+// "/" is key names are escaped in the same way.
+// On Windows, the characters "<>:"|?*" are also escaped.
//
-// As
+// # As
//
// fileblob exposes the following types for As:
-// - Bucket: os.FileInfo
-// - Error: *os.PathError
-// - ListObject: os.FileInfo
-// - Reader: io.Reader
-// - ReaderOptions.BeforeRead: *os.File
-// - Attributes: os.FileInfo
-// - CopyOptions.BeforeCopy: *os.File
-// - WriterOptions.BeforeWrite: *os.File
-
+// - Bucket: os.FileInfo
+// - Error: *os.PathError
+// - ListObject: os.FileInfo
+// - Reader: io.Reader
+// - ReaderOptions.BeforeRead: *os.File
+// - Attributes: os.FileInfo
+// - CopyOptions.BeforeCopy: *os.File
+// - WriterOptions.BeforeWrite: *os.File
package fileblob // import "gocloud.dev/blob/fileblob"
import (
@@ -57,6 +64,7 @@ import (
"fmt"
"hash"
"io"
+ "io/fs"
"io/ioutil"
"net/url"
"os"
@@ -97,23 +105,25 @@ const Scheme = "file"
// - base_url: the base URL to use to construct signed URLs; see URLSignerHMAC
// - secret_key_path: path to read for the secret key used to construct signed URLs;
// see URLSignerHMAC
+// - metadata: if set to "skip", won't write metadata such as blob.Attributes
+// as per the package docstring
//
// If either of base_url / secret_key_path are provided, both must be.
//
-// - file:///a/directory
-// -> Passes "/a/directory" to OpenBucket.
-// - file://localhost/a/directory
-// -> Also passes "/a/directory".
-// - file://./../..
-// -> The hostname is ".", signaling a relative path; passes "../..".
-// - file:///c:/foo/bar on Windows.
-// -> Passes "c:\foo\bar".
-// - file://localhost/c:/foo/bar on Windows.
-// -> Also passes "c:\foo\bar".
-// - file:///a/directory?base_url=/show&secret_key_path=secret.key
-// -> Passes "/a/directory" to OpenBucket, and sets Options.URLSigner
-// to a URLSignerHMAC initialized with base URL "/show" and secret key
-// bytes read from the file "secret.key".
+// - file:///a/directory
+// -> Passes "/a/directory" to OpenBucket.
+// - file://localhost/a/directory
+// -> Also passes "/a/directory".
+// - file://./../..
+// -> The hostname is ".", signaling a relative path; passes "../..".
+// - file:///c:/foo/bar on Windows.
+// -> Passes "c:\foo\bar".
+// - file://localhost/c:/foo/bar on Windows.
+// -> Also passes "c:\foo\bar".
+// - file:///a/directory?base_url=/show&secret_key_path=secret.key
+// -> Passes "/a/directory" to OpenBucket, and sets Options.URLSigner
+// to a URLSignerHMAC initialized with base URL "/show" and secret key
+// bytes read from the file "secret.key".
type URLOpener struct {
// Options specifies the default options to pass to OpenBucket.
Options Options
@@ -134,15 +144,46 @@ func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket
return OpenBucket(filepath.FromSlash(path), opts)
}
+var recognizedParams = map[string]bool{
+ "create_dir": true,
+ "base_url": true,
+ "secret_key_path": true,
+ "metadata": true,
+}
+
+type metadataOption string // Not exported as subject to change.
+
+// Settings for Options.Metadata.
+const (
+ // Metadata gets written to a separate file.
+ MetadataInSidecar metadataOption = ""
+ // Writes won't carry metadata, as per the package docstring.
+ MetadataDontWrite metadataOption = "skip"
+)
+
func (o *URLOpener) forParams(ctx context.Context, q url.Values) (*Options, error) {
for k := range q {
- if k != "create_dir" && k != "base_url" && k != "secret_key_path" {
+ if _, ok := recognizedParams[k]; !ok {
return nil, fmt.Errorf("invalid query parameter %q", k)
}
}
opts := new(Options)
*opts = o.Options
+ // Note: can't just use q.Get, because then we can't distinguish between
+ // "not set" (we should leave opts alone) vs "set to empty string" (which is
+ // one of the legal values, we should override opts).
+ metadataVal := q["metadata"]
+ if len(metadataVal) > 0 {
+ switch metadataOption(metadataVal[0]) {
+ case MetadataDontWrite:
+ opts.Metadata = MetadataDontWrite
+ case MetadataInSidecar:
+ opts.Metadata = MetadataInSidecar
+ default:
+ return nil, errors.New("fileblob.OpenBucket: unsupported value for query parameter 'metadata'")
+ }
+ }
if q.Get("create_dir") != "" {
opts.CreateDir = true
}
@@ -176,6 +217,11 @@ type Options struct {
// If true, create the directory backing the Bucket if it does not exist
// (using os.MkdirAll).
CreateDir bool
+
+ // Refers to the strategy for how to deal with metadata (such as blob.Attributes).
+ // For supported values please see the Metadata* constants.
+ // If left unchanged, 'MetadataInSidecar' will be used.
+ Metadata metadataOption
}
type bucket struct {
@@ -197,7 +243,7 @@ func openBucket(dir string, opts *Options) (driver.Bucket, error) {
// Optionally, create the directory if it does not already exist.
if err != nil && opts.CreateDir && os.IsNotExist(err) {
- err = os.MkdirAll(absdir, os.ModeDir)
+ err = os.MkdirAll(absdir, os.FileMode(0777))
if err != nil {
return nil, fmt.Errorf("tried to create directory but failed: %v", err)
}
@@ -322,6 +368,7 @@ func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driv
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
+ var lastKeyAdded string
// If the Prefix contains a "/", we can set the root of the Walk
// to the path specified by the Prefix as any files below the path will not
@@ -335,7 +382,7 @@ func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driv
// Do a full recursive scan of the root directory.
var result driver.ListPage
- err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ err := filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
@@ -386,18 +433,22 @@ func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driv
// For other blobs, md5 will remain nil.
md5 = xa.MD5
}
+ fi, err := info.Info()
+ if err != nil {
+ return err
+ }
asFunc := func(i interface{}) bool {
p, ok := i.(*os.FileInfo)
if !ok {
return false
}
- *p = info
+ *p = fi
return true
}
obj := &driver.ListObject{
Key: key,
- ModTime: info.ModTime(),
- Size: info.Size(),
+ ModTime: fi.ModTime(),
+ Size: fi.Size(),
MD5: md5,
AsFunc: asFunc,
}
@@ -429,16 +480,36 @@ func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driv
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
- if len(result.Objects) == pageSize {
+ // Unless the current object is a directory, in which case there may
+ // still be objects coming that are alphabetically before it (since
+ // we appended the delimiter). In that case, keep going; we'll trim the
+ // extra entries (if any) before returning.
+ if len(result.Objects) == pageSize && !obj.IsDir {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
+ // Normally, objects are added in the correct order (by Key).
+ // However, sometimes adding the file delimiter messes that up (e.g.,
+ // if the file delimiter is later in the alphabet than the last character
+ // of a key).
+ // Detect if this happens and swap if needed.
+ if len(result.Objects) > 1 && obj.Key < lastKeyAdded {
+ i := len(result.Objects) - 1
+ result.Objects[i-1], result.Objects[i] = result.Objects[i], result.Objects[i-1]
+ lastKeyAdded = result.Objects[i].Key
+ } else {
+ lastKeyAdded = obj.Key
+ }
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
+ if len(result.Objects) > pageSize {
+ result.Objects = result.Objects[0:pageSize]
+ result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
+ }
return &result, nil
}
@@ -577,7 +648,7 @@ func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType str
if err != nil {
return nil, err
}
- if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
+ if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0777)); err != nil {
return nil, err
}
f, err := ioutil.TempFile(filepath.Dir(path), "fileblob")
@@ -596,6 +667,16 @@ func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType str
return nil, err
}
}
+
+ if b.opts.Metadata == MetadataDontWrite {
+ w := &writer{
+ ctx: ctx,
+ File: f,
+ path: path,
+ }
+ return w, nil
+ }
+
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
@@ -608,7 +689,7 @@ func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType str
ContentType: contentType,
Metadata: metadata,
}
- w := &writer{
+ w := &writerWithSidecar{
ctx: ctx,
f: f,
path: path,
@@ -619,7 +700,8 @@ func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType str
return w, nil
}
-type writer struct {
+// writerWithSidecar implements the strategy of storing metadata in a distinct file.
+type writerWithSidecar struct {
ctx context.Context
f *os.File
path string
@@ -630,14 +712,20 @@ type writer struct {
md5hash hash.Hash
}
-func (w *writer) Write(p []byte) (n int, err error) {
+func (w *writerWithSidecar) Write(p []byte) (n int, err error) {
+ n, err = w.f.Write(p)
+ if err != nil {
+ // Don't hash the unwritten tail twice when writing is resumed.
+ w.md5hash.Write(p[:n])
+ return n, err
+ }
if _, err := w.md5hash.Write(p); err != nil {
- return 0, err
+ return n, err
}
- return w.f.Write(p)
+ return n, nil
}
-func (w *writer) Close() error {
+func (w *writerWithSidecar) Close() error {
err := w.f.Close()
if err != nil {
return err
@@ -668,6 +756,38 @@ func (w *writer) Close() error {
return nil
}
+// writer is a file with a temporary name until closed.
+//
+// Embedding os.File allows the likes of io.Copy to use optimizations.,
+// which is why it is not folded into writerWithSidecar.
+type writer struct {
+ *os.File
+ ctx context.Context
+ path string
+}
+
+func (w *writer) Close() error {
+ err := w.File.Close()
+ if err != nil {
+ return err
+ }
+ // Always delete the temp file. On success, it will have been renamed so
+ // the Remove will fail.
+ tempname := w.File.Name()
+ defer os.Remove(tempname)
+
+ // Check if the write was cancelled.
+ if err := w.ctx.Err(); err != nil {
+ return err
+ }
+
+ // Rename the temp file to path.
+ if err := os.Rename(tempname, w.path); err != nil {
+ return err
+ }
+ return nil
+}
+
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
// Note: we could use NewRangeReader here, but since we need to copy all of
diff --git a/vendor/gocloud.dev/blob/memblob/memblob.go b/vendor/gocloud.dev/blob/memblob/memblob.go
index 59a9c4560..f46ddcc4a 100644
--- a/vendor/gocloud.dev/blob/memblob/memblob.go
+++ b/vendor/gocloud.dev/blob/memblob/memblob.go
@@ -15,14 +15,14 @@
// Package memblob provides an in-memory blob implementation.
// Use OpenBucket to construct a *blob.Bucket.
//
-// URLs
+// # URLs
//
// For blob.OpenBucket memblob registers for the scheme "mem".
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
-// As
+// # As
//
// memblob does not support any types for As.
package memblob // import "gocloud.dev/blob/memblob"
diff --git a/vendor/gocloud.dev/gcerrors/errors.go b/vendor/gocloud.dev/gcerrors/errors.go
index ca283652f..cf84247d5 100644
--- a/vendor/gocloud.dev/gcerrors/errors.go
+++ b/vendor/gocloud.dev/gcerrors/errors.go
@@ -28,43 +28,43 @@ import (
type ErrorCode = gcerr.ErrorCode
const (
- // Returned by the Code function on a nil error. It is not a valid
+ // OK is returned by the Code function on a nil error. It is not a valid
// code for an error.
OK ErrorCode = gcerr.OK
- // The error could not be categorized.
+ // Unknown means that the error could not be categorized.
Unknown ErrorCode = gcerr.Unknown
- // The resource was not found.
+ // NotFound means that the resource was not found.
NotFound ErrorCode = gcerr.NotFound
- // The resource exists, but it should not.
+ // AlreadyExists means that the resource exists, but it should not.
AlreadyExists ErrorCode = gcerr.AlreadyExists
- // A value given to a Go CDK API is incorrect.
+ // InvalidArguments means that a value given to a Go CDK API is incorrect.
InvalidArgument ErrorCode = gcerr.InvalidArgument
- // Something unexpected happened. Internal errors always indicate
+ // Internal means that something unexpected happened. Internal errors always indicate
// bugs in the Go CDK (or possibly the underlying service).
Internal ErrorCode = gcerr.Internal
- // The feature is not implemented.
+ // Unimplemented means that the feature is not implemented.
Unimplemented ErrorCode = gcerr.Unimplemented
- // The system was in the wrong state.
+ // FailedPrecondition means that the system was in the wrong state.
FailedPrecondition ErrorCode = gcerr.FailedPrecondition
- // The caller does not have permission to execute the specified operation.
+ // PermissionDenied means that the caller does not have permission to execute the specified operation.
PermissionDenied ErrorCode = gcerr.PermissionDenied
- // Some resource has been exhausted, typically because a service resource limit
+ // ResourceExhausted means that some resource has been exhausted, typically because a service resource limit
// has been reached.
ResourceExhausted ErrorCode = gcerr.ResourceExhausted
- // The operation was canceled.
+ // Canceled means that the operation was canceled.
Canceled ErrorCode = gcerr.Canceled
- // The operation timed out.
+ // DeadlinedExceeded means that the operation timed out.
DeadlineExceeded ErrorCode = gcerr.DeadlineExceeded
)
diff --git a/vendor/gocloud.dev/internal/gcerr/gcerr.go b/vendor/gocloud.dev/internal/gcerr/gcerr.go
index b8c82a53f..f28f0e499 100644
--- a/vendor/gocloud.dev/internal/gcerr/gcerr.go
+++ b/vendor/gocloud.dev/internal/gcerr/gcerr.go
@@ -31,43 +31,43 @@ import (
type ErrorCode int
const (
- // Returned by the Code function on a nil error. It is not a valid
+ // OK is returned by the Code function on a nil error. It is not a valid
// code for an error.
OK ErrorCode = 0
- // The error could not be categorized.
+ // Unknown means that the error could not be categorized.
Unknown ErrorCode = 1
- // The resource was not found.
+ // NotFound means that the resource was not found.
NotFound ErrorCode = 2
- // The resource exists, but it should not.
+ // AlreadyExists means that the resource exists, but it should not.
AlreadyExists ErrorCode = 3
- // A value given to a Go CDK API is incorrect.
+ // InvalidArguments means that a value given to a Go CDK API is incorrect.
InvalidArgument ErrorCode = 4
- // Something unexpected happened. Internal errors always indicate
+ // Internal means that something unexpected happened. Internal errors always indicate
// bugs in the Go CDK (or possibly the underlying service).
Internal ErrorCode = 5
- // The feature is not implemented.
+ // Unimplemented means that the feature is not implemented.
Unimplemented ErrorCode = 6
- // The system was in the wrong state.
+ // FailedPrecondition means that the system was in the wrong state.
FailedPrecondition ErrorCode = 7
- // The caller does not have permission to execute the specified operation.
+ // PermissionDenied means that the caller does not have permission to execute the specified operation.
PermissionDenied ErrorCode = 8
- // Some resource has been exhausted, typically because a service resource limit
+ // ResourceExhausted means that some resource has been exhausted, typically because a service resource limit
// has been reached.
ResourceExhausted ErrorCode = 9
- // The operation was canceled.
+ // Canceled means that the operation was canceled.
Canceled ErrorCode = 10
- // The operation timed out.
+ // DeadlineExceeded means that The operation timed out.
DeadlineExceeded ErrorCode = 11
)
@@ -84,20 +84,24 @@ const (
// An Error describes a Go CDK error.
type Error struct {
+ // Code is the error code.
Code ErrorCode
msg string
frame xerrors.Frame
err error
}
+// Error returns the error as a string.
func (e *Error) Error() string {
return fmt.Sprint(e)
}
+// Format formats the error.
func (e *Error) Format(s fmt.State, c rune) {
xerrors.FormatError(e, s, c)
}
+// FormatError formats the errots.
func (e *Error) FormatError(p xerrors.Printer) (next error) {
if e.msg == "" {
p.Printf("code=%v", e.Code)
diff --git a/vendor/gocloud.dev/internal/retry/retry.go b/vendor/gocloud.dev/internal/retry/retry.go
index 05b064045..cc8aebf58 100644
--- a/vendor/gocloud.dev/internal/retry/retry.go
+++ b/vendor/gocloud.dev/internal/retry/retry.go
@@ -73,6 +73,7 @@ func (e *ContextError) Error() string {
return fmt.Sprintf("%v; last error: %v", e.CtxErr, e.FuncErr)
}
+// Is returns true iff one of the two errors held in e is equal to target.
func (e *ContextError) Is(target error) bool {
return e.CtxErr == target || e.FuncErr == target
}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 000000000..fbf1934a0
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index a17b3cf69..5e8158bba 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -3,23 +3,20 @@
// license that can be found in the LICENSE file.
// Package slices defines various functions useful with slices of any type.
-// Unless otherwise specified, these functions all apply to the elements
-// of a slice at index 0 <= i < len(s).
-//
-// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
-// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
-// or the sorting may fail to sort correctly. A common case is when sorting slices of
-// floating-point numbers containing NaN values.
package slices
-import "golang.org/x/exp/constraints"
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
-func Equal[E comparable](s1, s2 []E) bool {
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
@@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool {
return true
}
-// EqualFunc reports whether two slices are equal using a comparison
+// EqualFunc reports whether two slices are equal using an equality
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
-func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
@@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
return true
}
-// Compare compares the elements of s1 and s2.
-// The elements are compared sequentially, starting at index 0,
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-// Comparisons involving floating point NaNs are ignored.
-func Compare[E constraints.Ordered](s1, s2 []E) int {
- s2len := len(s2)
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
- if i >= s2len {
+ if i >= len(s2) {
return +1
}
v2 := s2[i]
- switch {
- case v1 < v2:
- return -1
- case v1 > v2:
- return +1
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
}
}
- if len(s1) < s2len {
+ if len(s1) < len(s2) {
return -1
}
return 0
}
-// CompareFunc is like Compare but uses a comparison function
-// on each pair of elements. The elements are compared in increasing
-// index order, and the comparisons stop after the first time cmp
-// returns non-zero.
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
-func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
- s2len := len(s2)
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
- if i >= s2len {
+ if i >= len(s2) {
return +1
}
v2 := s2[i]
@@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
return c
}
}
- if len(s1) < s2len {
+ if len(s1) < len(s2) {
return -1
}
return 0
@@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
-func Index[E comparable](s []E, v E) int {
- for i, vs := range s {
- if v == vs {
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
return i
}
}
@@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int {
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
-func IndexFunc[E any](s []E, f func(E) bool) int {
- for i, v := range s {
- if f(v) {
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
return i
}
}
@@ -124,33 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int {
}
// Contains reports whether v is present in s.
-func Contains[E comparable](s []E, v E) bool {
+func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
}
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
// Insert inserts the values v... into s at index i,
// returning the modified slice.
-// In the returned slice r, r[i] == v[0].
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- tot := len(s) + len(v)
- if tot <= cap(s) {
- s2 := s[:tot]
- copy(s2[i+len(v):], s[i:])
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
return s2
}
- s2 := make(S, tot)
- copy(s2, s[:i])
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[i:])
- return s2
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
-// Delete modifies the contents of the slice s; it does not create a new slice.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
@@ -162,6 +222,115 @@ func Delete[S ~[]E, E any](s S, i, j int) S {
return append(s[:i], s[j:]...)
}
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// When DeleteFunc removes m elements, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage
+// collected.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
@@ -174,35 +343,40 @@ func Clone[S ~[]E, E any](s S) S {
// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
-// Compact modifies the contents of the slice s; it does not create a new slice.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
func Compact[S ~[]E, E comparable](s S) S {
- if len(s) == 0 {
+ if len(s) < 2 {
return s
}
i := 1
- last := s[0]
- for _, v := range s[1:] {
- if v != last {
- s[i] = v
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
i++
- last = v
}
}
return s[:i]
}
-// CompactFunc is like Compact but uses a comparison function.
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) == 0 {
+ if len(s) < 2 {
return s
}
i := 1
- last := s[0]
- for _, v := range s[1:] {
- if !eq(v, last) {
- s[i] = v
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
i++
- last = v
}
}
return s[:i]
@@ -210,14 +384,116 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
-// to the slice without another allocation. Grow may modify elements of the
-// slice between the length and the capacity. If n is negative or too large to
+// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
- return append(s, make(S, n)...)[:len(s)]
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
index c22e74bd1..b67897f76 100644
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
package slices
import (
@@ -11,97 +13,157 @@ import (
)
// Sort sorts a slice of any ordered type in ascending order.
-// Sort may fail to sort correctly when sorting slices of floating-point
-// numbers containing Not-a-number (NaN) values.
-// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
-// instead if the input may contain NaNs.
-func Sort[E constraints.Ordered](x []E) {
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}
-// SortFunc sorts the slice x in ascending order as determined by the less function.
-// This sort is not guaranteed to be stable.
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b.
//
-// SortFunc requires that less is a strict weak ordering.
+// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
-func SortFunc[E any](x []E, less func(a, b E) bool) {
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
- pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
}
-// SortStable sorts the slice x while keeping the original order of equal
-// elements, using less to compare elements.
-func SortStableFunc[E any](x []E, less func(a, b E) bool) {
- stableLessFunc(x, len(x), less)
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
}
// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[E constraints.Ordered](x []E) bool {
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
- if x[i] < x[i-1] {
+ if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
}
-// IsSortedFunc reports whether x is sorted in ascending order, with less as the
-// comparison function.
-func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
- if less(x[i], x[i-1]) {
+ if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
}
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
- // search returns the leftmost position where f returns true, or len(x) if f
- // returns false for all x. This is the insertion position for target in x,
- // and could point to an element that's either == target or not.
- pos := search(len(x), func(i int) bool { return x[i] >= target })
- if pos >= len(x) || x[pos] != target {
- return pos, false
- } else {
- return pos, true
- }
-}
-
-// BinarySearchFunc works like BinarySearch, but uses a custom comparison
-// function. The slice must be sorted in increasing order, where "increasing" is
-// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
-// parameters: 0 if a == b, a negative number if a < b and a positive number if
-// a > b.
-func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) {
- pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 })
- if pos >= len(x) || cmp(x[pos], target) != 0 {
- return pos, false
- } else {
- return pos, true
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
}
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
}
-func search(n int, f func(int) bool) int {
- // Define f(-1) == false and f(n) == true.
- // Invariant: f(i-1) == false, f(j) == true.
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
- if !f(h) {
- i = h + 1 // preserves f(i-1) == false
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
- j = h // preserves f(j) == true
+ j = h // preserves cmp(x[j], target) >= 0
}
}
- // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
- return i
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
}
type sortedHint int // hint for pdqsort when choosing the pivot
@@ -125,3 +187,9 @@ func (r *xorshift) Next() uint64 {
func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
similarity index 64%
rename from vendor/golang.org/x/exp/slices/zsortfunc.go
rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go
index 2a632476c..06f2c7a24 100644
--- a/vendor/golang.org/x/exp/slices/zsortfunc.go
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -6,28 +6,28 @@
package slices
-// insertionSortLessFunc sorts data[a:b] using insertion sort.
-func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
- for j := i; j > a && less(data[j], data[j-1]); j-- {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
-// siftDownLessFunc implements the heap property on data[lo:hi].
+// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
-func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
- if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
- if !less(data[first+root], data[first+child]) {
+ if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool
}
}
-func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownLessFunc(data, i, hi, first, less)
+ siftDownCmpFunc(data, i, hi, first, cmp)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
- siftDownLessFunc(data, lo, i, first, less)
+ siftDownCmpFunc(data, lo, i, first, cmp)
}
}
-// pdqsortLessFunc sorts data[a:b].
+// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12
var (
@@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
length := b - a
if length <= maxInsertion {
- insertionSortLessFunc(data, a, b, less)
+ insertionSortCmpFunc(data, a, b, cmp)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
- heapSortLessFunc(data, a, b, less)
+ heapSortCmpFunc(data, a, b, cmp)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
- breakPatternsLessFunc(data, a, b, less)
+ breakPatternsCmpFunc(data, a, b, cmp)
limit--
}
- pivot, hint := choosePivotLessFunc(data, a, b, less)
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
- reverseRangeLessFunc(data, a, b, less)
+ reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
@@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortLessFunc(data, a, b, less) {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
- if a > 0 && !less(data[a-1], data[pivot]) {
- mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}
- mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
- pdqsortLessFunc(data, a, mid, limit, less)
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
- pdqsortLessFunc(data, mid+1, b, limit, less)
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}
-// partitionLessFunc does one quicksort partition.
+// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
// On return, data[newpivot] = p
-func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
- for i <= j && less(data[i], data[a]) {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
- for i <= j && !less(data[j], data[a]) {
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
j--
for {
- for i <= j && less(data[i], data[a]) {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
- for i <= j && !less(data[j], data[a]) {
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
return j, false
}
-// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
- for i <= j && !less(data[a], data[i]) {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
- for i <= j && less(data[a], data[j]) {
+ for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
@@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E)
return i
}
-// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
- for i < b && !less(data[i], data[i-1]) {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}
@@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
- if !less(data[j], data[j-1]) {
+ if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
- if !less(data[j], data[j-1]) {
+ if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
return false
}
-// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
-func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
@@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
-// choosePivotLessFunc chooses a pivot in data[a:b].
+// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
@@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentLessFunc(data, i, &swaps, less)
- j = medianAdjacentLessFunc(data, j, &swaps, less)
- k = medianAdjacentLessFunc(data, k, &swaps, less)
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
- j = medianLessFunc(data, i, j, k, &swaps, less)
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}
switch swaps {
@@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
}
}
-// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
- if less(data[b], data[a]) {
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}
-// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
- a, b = order2LessFunc(data, a, b, swaps, less)
- b, c = order2LessFunc(data, b, c, swaps, less)
- a, b = order2LessFunc(data, a, b, swaps, less)
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}
-// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
- return medianLessFunc(data, a-1, a, a+1, swaps, less)
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}
-func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
@@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
-func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
-func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
- insertionSortLessFunc(data, a, b, less)
+ insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
- insertionSortLessFunc(data, a, n, less)
+ insertionSortCmpFunc(data, a, n, cmp)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
- symMergeLessFunc(data, a, a+blockSize, b, less)
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
- symMergeLessFunc(data, a, m, n, less)
+ symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}
-// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
@@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
-func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
@@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
- if less(data[h], data[a]) {
+ if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
@@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
- if !less(data[m], data[h]) {
+ if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
@@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
for start < r {
c := int(uint(start+r) >> 1)
- if !less(data[p-c], data[c]) {
+ if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
@@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
end := n - start
if start < m && m < end {
- rotateLessFunc(data, start, m, end, less)
+ rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
- symMergeLessFunc(data, a, start, mid, less)
+ symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
- symMergeLessFunc(data, mid, end, b, less)
+ symMergeCmpFunc(data, mid, end, b, cmp)
}
}
-// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m
for i != j {
if i > j {
- swapRangeLessFunc(data, m-i, m, j, less)
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
- swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
- swapRangeLessFunc(data, m-i, m, i, less)
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
index efaa1c8b7..99b47c398 100644
--- a/vendor/golang.org/x/exp/slices/zsortordered.go
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints"
// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
- for j := i; j > a && (data[j] < data[j-1]); j-- {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
@@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
if child >= hi {
break
}
- if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
- if !(data[first+root] < data[first+child]) {
+ if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
- if a > 0 && !(data[a-1] < data[pivot]) {
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
@@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
- for i <= j && (data[i] < data[a]) {
+ for i <= j && cmpLess(data[i], data[a]) {
i++
}
- for i <= j && !(data[j] < data[a]) {
+ for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
j--
for {
- for i <= j && (data[i] < data[a]) {
+ for i <= j && cmpLess(data[i], data[a]) {
i++
}
- for i <= j && !(data[j] < data[a]) {
+ for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
- for i <= j && !(data[a] < data[i]) {
+ for i <= j && !cmpLess(data[a], data[i]) {
i++
}
- for i <= j && (data[a] < data[j]) {
+ for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
@@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
)
i := a + 1
for j := 0; j < maxSteps; j++ {
- for i < b && !(data[i] < data[i-1]) {
+ for i < b && !cmpLess(data[i], data[i-1]) {
i++
}
@@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
- if !(data[j] < data[j-1]) {
+ if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
- if !(data[j] < data[j-1]) {
+ if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if data[b] < data[a] {
+ if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
@@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
- if data[h] < data[a] {
+ if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
@@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
- if !(data[m] < data[h]) {
+ if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
@@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
for start < r {
c := int(uint(start+r) >> 1)
- if !(data[p-c] < data[c]) {
+ if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index 2cb9c408f..0c1b86793 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.7
-// +build go1.7
package context
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
index 64d31ecc3..e31e35a90 100644
--- a/vendor/golang.org/x/net/context/go19.go
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.9
-// +build go1.9
package context
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
index 7b6b68511..065ff3dfa 100644
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.7
-// +build !go1.7
package context
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
index 1f9715341..ec5a63803 100644
--- a/vendor/golang.org/x/net/context/pre_go19.go
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.9
-// +build !go1.9
package context
diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go
index a3067f8de..e6f55cbd1 100644
--- a/vendor/golang.org/x/net/http2/databuffer.go
+++ b/vendor/golang.org/x/net/http2/databuffer.go
@@ -20,41 +20,44 @@ import (
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
-var (
- dataChunkSizeClasses = []int{
- 1 << 10,
- 2 << 10,
- 4 << 10,
- 8 << 10,
- 16 << 10,
- }
- dataChunkPools = [...]sync.Pool{
- {New: func() interface{} { return make([]byte, 1<<10) }},
- {New: func() interface{} { return make([]byte, 2<<10) }},
- {New: func() interface{} { return make([]byte, 4<<10) }},
- {New: func() interface{} { return make([]byte, 8<<10) }},
- {New: func() interface{} { return make([]byte, 16<<10) }},
- }
-)
+var dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return new([1 << 10]byte) }},
+ {New: func() interface{} { return new([2 << 10]byte) }},
+ {New: func() interface{} { return new([4 << 10]byte) }},
+ {New: func() interface{} { return new([8 << 10]byte) }},
+ {New: func() interface{} { return new([16 << 10]byte) }},
+}
func getDataBufferChunk(size int64) []byte {
- i := 0
- for ; i < len(dataChunkSizeClasses)-1; i++ {
- if size <= int64(dataChunkSizeClasses[i]) {
- break
- }
+ switch {
+ case size <= 1<<10:
+ return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
+ case size <= 2<<10:
+ return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
+ case size <= 4<<10:
+ return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
+ case size <= 8<<10:
+ return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
+ default:
+ return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
}
- return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
- for i, n := range dataChunkSizeClasses {
- if len(p) == n {
- dataChunkPools[i].Put(p)
- return
- }
+ switch len(p) {
+ case 1 << 10:
+ dataChunkPools[0].Put((*[1 << 10]byte)(p))
+ case 2 << 10:
+ dataChunkPools[1].Put((*[2 << 10]byte)(p))
+ case 4 << 10:
+ dataChunkPools[2].Put((*[4 << 10]byte)(p))
+ case 8 << 10:
+ dataChunkPools[3].Put((*[8 << 10]byte)(p))
+ case 16 << 10:
+ dataChunkPools[4].Put((*[16 << 10]byte)(p))
+ default:
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
- panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go
deleted file mode 100644
index 5bf62b032..000000000
--- a/vendor/golang.org/x/net/http2/go111.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.11
-// +build go1.11
-
-package http2
-
-import (
- "net/http/httptrace"
- "net/textproto"
-)
-
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
- return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(k, []string{v})
- }
-}
-
-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
- if trace != nil {
- return trace.Got1xxResponse
- }
- return nil
-}
diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go
deleted file mode 100644
index 908af1ab9..000000000
--- a/vendor/golang.org/x/net/http2/go115.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.15
-// +build go1.15
-
-package http2
-
-import (
- "context"
- "crypto/tls"
-)
-
-// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
-// connection.
-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
- dialer := &tls.Dialer{
- Config: cfg,
- }
- cn, err := dialer.DialContext(ctx, network, addr)
- if err != nil {
- return nil, err
- }
- tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
- return tlsCn, nil
-}
diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go
deleted file mode 100644
index aca4b2b31..000000000
--- a/vendor/golang.org/x/net/http2/go118.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package http2
-
-import (
- "crypto/tls"
- "net"
-)
-
-func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
- return tc.NetConn()
-}
diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go
deleted file mode 100644
index cc0baa819..000000000
--- a/vendor/golang.org/x/net/http2/not_go111.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.11
-// +build !go1.11
-
-package http2
-
-import (
- "net/http/httptrace"
- "net/textproto"
-)
-
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
-
-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
- return nil
-}
diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go
deleted file mode 100644
index e6c04cf7a..000000000
--- a/vendor/golang.org/x/net/http2/not_go115.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.15
-// +build !go1.15
-
-package http2
-
-import (
- "context"
- "crypto/tls"
-)
-
-// dialTLSWithContext opens a TLS connection.
-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
- cn, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- if err := cn.Handshake(); err != nil {
- return nil, err
- }
- if cfg.InsecureSkipVerify {
- return cn, nil
- }
- if err := cn.VerifyHostname(cfg.ServerName); err != nil {
- return nil, err
- }
- return cn, nil
-}
diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go
deleted file mode 100644
index eab532c96..000000000
--- a/vendor/golang.org/x/net/http2/not_go118.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package http2
-
-import (
- "crypto/tls"
- "net"
-)
-
-func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
- return nil
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 02c88b6b3..ae94c6408 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2549,7 +2549,6 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
- dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
- rws.dirty = true
return 0, err
}
if endStream {
@@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
- rws.dirty = true
return 0, err
}
}
@@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
- if err != nil {
- rws.dirty = true
- }
return len(p), err
}
return len(p), nil
@@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) {
h.Del("Transfer-Encoding")
}
- if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: code,
h: h,
endStream: rws.handlerDone && !rws.hasTrailers(),
- }) != nil {
- rws.dirty = true
- }
+ })
return
}
@@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
- dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
- if !dirty {
- // Only recycle the pool if all prior Write calls to
- // the serverConn goroutine completed successfully. If
- // they returned earlier due to resets from the peer
- // there might still be write goroutines outstanding
- // from the serverConn referencing the rws memory. See
- // issue 20704.
- responseWriterStatePool.Put(rws)
- }
+ responseWriterStatePool.Put(rws)
}
// Push errors.
@@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
+ sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 4515b22c4..df578b86c 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() {
if !ok {
return
}
- if nc := tlsUnderlyingConn(tc); nc != nil {
+ if nc := tc.NetConn(); nc != nil {
nc.Close()
}
}
@@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
trace.GotFirstResponseByte()
}
}
+
+func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+ return trace != nil && trace.WroteHeaderField != nil
+}
+
+func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(k, []string{v})
+ }
+}
+
+func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+ dialer := &tls.Dialer{
+ Config: cfg,
+ }
+ cn, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+ return tlsCn, nil
+}
diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go
index c5c4338db..712f1ad83 100644
--- a/vendor/golang.org/x/net/idna/go118.go
+++ b/vendor/golang.org/x/net/idna/go118.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.18
-// +build go1.18
package idna
diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go
index 64ccf85fe..7b3717884 100644
--- a/vendor/golang.org/x/net/idna/idna10.0.0.go
+++ b/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.10
-// +build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go
index ee1698cef..cc6a892a4 100644
--- a/vendor/golang.org/x/net/idna/idna9.0.0.go
+++ b/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.10
-// +build !go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go
index 3aaccab1c..40e74bb3d 100644
--- a/vendor/golang.org/x/net/idna/pre_go118.go
+++ b/vendor/golang.org/x/net/idna/pre_go118.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.18
-// +build !go1.18
package idna
diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go
index d1d62ef45..c6c2bf10a 100644
--- a/vendor/golang.org/x/net/idna/tables10.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables10.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
-// +build go1.10,!go1.13
package idna
diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go
index 167efba71..76789393c 100644
--- a/vendor/golang.org/x/net/idna/tables11.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
-// +build go1.13,!go1.14
package idna
diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go
index ab40f7bcc..0600cd2ae 100644
--- a/vendor/golang.org/x/net/idna/tables12.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables12.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
-// +build go1.14,!go1.16
package idna
diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go
index 66701eadf..2fb768ef6 100644
--- a/vendor/golang.org/x/net/idna/tables13.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables13.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
-// +build go1.16,!go1.21
package idna
diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go
index 40033778f..5ff05fe1a 100644
--- a/vendor/golang.org/x/net/idna/tables15.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables15.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
-// +build go1.21
package idna
diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go
index 4074b5332..0f25e84ca 100644
--- a/vendor/golang.org/x/net/idna/tables9.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables9.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
-// +build !go1.10
package idna
diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go
index bb63f904b..8a75b9667 100644
--- a/vendor/golang.org/x/net/idna/trie12.0.0.go
+++ b/vendor/golang.org/x/net/idna/trie12.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.16
-// +build !go1.16
package idna
diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go
index 7d68a8dc1..fa45bb907 100644
--- a/vendor/golang.org/x/net/idna/trie13.0.0.go
+++ b/vendor/golang.org/x/net/idna/trie13.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.16
-// +build go1.16
package idna
diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go
index ca717634a..03c42c6f8 100644
--- a/vendor/golang.org/x/oauth2/google/doc.go
+++ b/vendor/golang.org/x/oauth2/google/doc.go
@@ -101,6 +101,8 @@
// executable-sourced credentials), please check out:
// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in
//
+// # Security considerations
+//
// Note that this library does not perform any validation on the token_url, token_info_url,
// or service_account_impersonation_url fields of the credential configuration.
// It is not recommended to use a credential configuration that you did not generate with
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3cb..8f6c7f493 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
type Limiter struct {
mu sync.Mutex
limit Limit
diff --git a/vendor/gomodules.xyz/blobfs/lib.go b/vendor/gomodules.xyz/blobfs/lib.go
index 099d3cf3f..cd9e0965b 100644
--- a/vendor/gomodules.xyz/blobfs/lib.go
+++ b/vendor/gomodules.xyz/blobfs/lib.go
@@ -14,10 +14,19 @@ import (
type BlobFS struct {
storageURL string
+ prefix string
}
-func New(storageURL string) Interface {
- return &BlobFS{storageURL: storageURL}
+func New(storageURL string, prefix ...string) Interface {
+ var bucketPrefix string
+ if len(prefix) > 0 {
+ bucketPrefix = prefix[0]
+ }
+
+ return &BlobFS{
+ storageURL: storageURL,
+ prefix: bucketPrefix,
+ }
}
func NewInMemoryFS() Interface {
@@ -30,7 +39,7 @@ func NewOsFs() Interface {
func (fs *BlobFS) WriteFile(ctx context.Context, filepath string, data []byte) error {
dir, filename := path.Split(filepath)
- bucket, err := fs.openBucket(ctx, dir)
+ bucket, err := fs.openBucket(ctx, path.Join(fs.prefix, dir))
if err != nil {
return err
}
@@ -54,7 +63,7 @@ func (fs *BlobFS) WriteFile(ctx context.Context, filepath string, data []byte) e
func (fs *BlobFS) ReadFile(ctx context.Context, filepath string) ([]byte, error) {
dir, filename := path.Split(filepath)
- bucket, err := fs.openBucket(ctx, dir)
+ bucket, err := fs.openBucket(ctx, path.Join(fs.prefix, dir))
if err != nil {
return nil, err
}
@@ -76,7 +85,7 @@ func (fs *BlobFS) ReadFile(ctx context.Context, filepath string) ([]byte, error)
func (fs *BlobFS) DeleteFile(ctx context.Context, filepath string) error {
dir, filename := path.Split(filepath)
- bucket, err := fs.openBucket(ctx, dir)
+ bucket, err := fs.openBucket(ctx, path.Join(fs.prefix, dir))
if err != nil {
return err
}
@@ -87,7 +96,7 @@ func (fs *BlobFS) DeleteFile(ctx context.Context, filepath string) error {
func (fs *BlobFS) Exists(ctx context.Context, filepath string) (bool, error) {
dir, filename := path.Split(filepath)
- bucket, err := fs.openBucket(ctx, dir)
+ bucket, err := fs.openBucket(ctx, path.Join(fs.prefix, dir))
if err != nil {
return false, err
}
@@ -98,7 +107,7 @@ func (fs *BlobFS) Exists(ctx context.Context, filepath string) (bool, error) {
func (fs *BlobFS) SignedURL(ctx context.Context, filepath string, opts *blob.SignedURLOptions) (string, error) {
dir, filename := path.Split(filepath)
- bucket, err := fs.openBucket(ctx, dir)
+ bucket, err := fs.openBucket(ctx, path.Join(fs.prefix, dir))
if err != nil {
return "", err
}
diff --git a/vendor/gomodules.xyz/cert/.gitignore b/vendor/gomodules.xyz/cert/.gitignore
new file mode 100644
index 000000000..485dee64b
--- /dev/null
+++ b/vendor/gomodules.xyz/cert/.gitignore
@@ -0,0 +1 @@
+.idea
diff --git a/vendor/gomodules.xyz/cert/cert.go b/vendor/gomodules.xyz/cert/cert.go
index 0e160dabd..7b041bd56 100644
--- a/vendor/gomodules.xyz/cert/cert.go
+++ b/vendor/gomodules.xyz/cert/cert.go
@@ -29,11 +29,11 @@ import (
"encoding/pem"
"errors"
"fmt"
- "io/ioutil"
"math"
"math/big"
"net"
"path"
+ "os"
"strings"
"time"
)
@@ -162,9 +162,9 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
if len(fixtureDirectory) > 0 {
- cert, err := ioutil.ReadFile(certFixturePath)
+ cert, err := os.ReadFile(certFixturePath)
if err == nil {
- key, err := ioutil.ReadFile(keyFixturePath)
+ key, err := os.ReadFile(keyFixturePath)
if err == nil {
return cert, key, nil
}
@@ -249,10 +249,10 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
}
if len(fixtureDirectory) > 0 {
- if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
+ if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
}
- if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
+ if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
}
}
diff --git a/vendor/gomodules.xyz/cert/io.go b/vendor/gomodules.xyz/cert/io.go
index a57bf09d5..72d0c4c53 100644
--- a/vendor/gomodules.xyz/cert/io.go
+++ b/vendor/gomodules.xyz/cert/io.go
@@ -23,7 +23,6 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
)
@@ -70,7 +69,7 @@ func WriteCert(certPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
return err
}
- return ioutil.WriteFile(certPath, data, os.FileMode(0644))
+ return os.WriteFile(certPath, data, os.FileMode(0644))
}
// WriteKey writes the pem-encoded key data to keyPath.
@@ -81,13 +80,13 @@ func WriteKey(keyPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
return err
}
- return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
+ return os.WriteFile(keyPath, data, os.FileMode(0600))
}
// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
// can't find one, it will generate a new key and store it there.
func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
- loadedData, err := ioutil.ReadFile(keyPath)
+ loadedData, err := os.ReadFile(keyPath)
// Call verifyKeyData to ensure the file wasn't empty/corrupt.
if err == nil && verifyKeyData(loadedData) {
return loadedData, false, err
@@ -144,7 +143,7 @@ func NewPool(filename string) (*x509.CertPool, error) {
// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func CertsFromFile(file string) ([]*x509.Certificate, error) {
- pemBlock, err := ioutil.ReadFile(file)
+ pemBlock, err := os.ReadFile(file)
if err != nil {
return nil, err
}
@@ -158,7 +157,7 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) {
// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
// Returns an error if the file could not be read or if the private key could not be parsed.
func PrivateKeyFromFile(file string) (interface{}, error) {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
@@ -172,7 +171,7 @@ func PrivateKeyFromFile(file string) (interface{}, error) {
// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
// Reads public keys from both public and private key files.
func PublicKeysFromFile(file string) ([]interface{}, error) {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
diff --git a/vendor/gomodules.xyz/envsubst/.drone.yml b/vendor/gomodules.xyz/envsubst/.drone.yml
new file mode 100644
index 000000000..2b062137f
--- /dev/null
+++ b/vendor/gomodules.xyz/envsubst/.drone.yml
@@ -0,0 +1,8 @@
+kind: pipeline
+name: default
+
+steps:
+- name: build
+ image: golang:1.11
+ commands:
+ - go test -v ./...
diff --git a/vendor/gomodules.xyz/envsubst/.gitignore b/vendor/gomodules.xyz/envsubst/.gitignore
index 64a82bf67..3fc8da69e 100644
--- a/vendor/gomodules.xyz/envsubst/.gitignore
+++ b/vendor/gomodules.xyz/envsubst/.gitignore
@@ -1,4 +1,5 @@
/envsubst
coverage.out
+/vendor
.idea/
.vscode/
diff --git a/vendor/gomodules.xyz/envsubst/.travis.yml b/vendor/gomodules.xyz/envsubst/.travis.yml
deleted file mode 100644
index 9a206e170..000000000
--- a/vendor/gomodules.xyz/envsubst/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-language: go
-go:
- - 1.x
- - tip
-
-go_import_path: gomodules.xyz/envsubst
-
-cache:
- directories:
- - $HOME/.cache/go-build
- - $GOPATH/pkg/mod
-
-env:
- - GO111MODULE=on
-
-install: true
-
-script:
- - go test -v ./...
diff --git a/vendor/gomodules.xyz/envsubst/funcs.go b/vendor/gomodules.xyz/envsubst/funcs.go
index 07cad8116..82035c071 100644
--- a/vendor/gomodules.xyz/envsubst/funcs.go
+++ b/vendor/gomodules.xyz/envsubst/funcs.go
@@ -50,10 +50,11 @@ func toUpperFirst(s string, args ...string) string {
}
// toDefault returns a copy of the string s if not empty, else
-// returns a copy of the first string arugment.
+// returns a concatenation of the args without a separator.
func toDefault(s string, args ...string) string {
- if len(s) == 0 && len(args) == 1 {
- s = args[0]
+ if len(s) == 0 && len(args) > 0 {
+ // don't use any separator
+ s = strings.Join(args, "")
}
return s
}
@@ -72,6 +73,18 @@ func toSubstr(s string, args ...string) string {
return s
}
+ if pos < 0 {
+ // if pos is negative (counts from the end) add it
+ // to length to get first character offset
+ pos = len(s) + pos
+
+ // if negative offset exceeds the length of the string
+ // start from 0
+ if pos < 0 {
+ pos = 0
+ }
+ }
+
if len(args) == 1 {
if pos < len(s) {
return s[pos:]
@@ -89,9 +102,14 @@ func toSubstr(s string, args ...string) string {
}
if pos+length >= len(s) {
+ if pos < len(s) {
+ // if the position exceeds the length of the
+ // string just return the rest of it like bash
+ return s[pos:]
+ }
// if the position exceeds the length of the
- // string just return the rest of it like bash
- return s[pos:]
+ // string an empty string is returned
+ return ""
}
return s[pos : pos+length]
diff --git a/vendor/gomodules.xyz/envsubst/parse/parse.go b/vendor/gomodules.xyz/envsubst/parse/parse.go
index 5dbecff93..bc418fbfb 100644
--- a/vendor/gomodules.xyz/envsubst/parse/parse.go
+++ b/vendor/gomodules.xyz/envsubst/parse/parse.go
@@ -1,9 +1,28 @@
package parse
-import "errors"
+import (
+ "errors"
+)
-// ErrBadSubstitution represents a substitution parsing error.
-var ErrBadSubstitution = errors.New("bad substitution")
+var (
+ // ErrBadSubstitution represents a substitution parsing error.
+ ErrBadSubstitution = errors.New("bad substitution")
+
+ // ErrMissingClosingBrace represents a missing closing brace "}" error.
+ ErrMissingClosingBrace = errors.New("missing closing brace")
+
+ // ErrParseVariableName represents the error when unable to parse a
+ // variable name within a substitution.
+ ErrParseVariableName = errors.New("unable to parse variable name")
+
+ // ErrParseFuncSubstitution represents the error when unable to parse the
+ // substitution within a function parameter.
+ ErrParseFuncSubstitution = errors.New("unable to parse substitution within function")
+
+ // ErrParseDefaultFunction represent the error when unable to parse a
+ // default function.
+ ErrParseDefaultFunction = errors.New("unable to parse default function")
+)
// Tree is the representation of a single parsed SQL statement.
type Tree struct {
@@ -31,6 +50,7 @@ func (t *Tree) Parse(buf string) (tree *Tree, err error) {
func (t *Tree) parseAny() (Node, error) {
t.scanner.accept = acceptRune
t.scanner.mode = scanIdent | scanLbrack | scanEscape
+ t.scanner.escapeChars = dollar
switch t.scanner.scan() {
case tokenIdent:
@@ -67,6 +87,8 @@ func (t *Tree) parseAny() (Node, error) {
}
func (t *Tree) parseFunc() (Node, error) {
+ // Turn on all escape characters
+ t.scanner.escapeChars = escapeAll
switch t.scanner.peek() {
case '#':
return t.parseLenFunc()
@@ -80,7 +102,7 @@ func (t *Tree) parseFunc() (Node, error) {
case tokenIdent:
name = t.scanner.string()
default:
- return nil, ErrBadSubstitution
+ return nil, ErrParseVariableName
}
switch t.scanner.peek() {
@@ -104,7 +126,7 @@ func (t *Tree) parseFunc() (Node, error) {
case tokenRbrack:
return newFuncNode(name), nil
default:
- return nil, ErrBadSubstitution
+ return nil, ErrMissingClosingBrace
}
}
@@ -119,8 +141,12 @@ func (t *Tree) parseParam(accept acceptFunc, mode byte) (Node, error) {
return newTextNode(
t.scanner.string(),
), nil
+ case tokenRbrack:
+ return newTextNode(
+ t.scanner.string(),
+ ), nil
default:
- return nil, ErrBadSubstitution
+ return nil, ErrParseFuncSubstitution
}
}
@@ -290,27 +316,23 @@ func (t *Tree) parseDefaultFunc(name string) (Node, error) {
case tokenIdent:
node.Name = t.scanner.string()
default:
- return nil, ErrBadSubstitution
+ return nil, ErrParseDefaultFunction
}
- // check for blank string
- switch t.scanner.peek() {
- case '}':
- return node, t.consumeRbrack()
- }
-
- // scan arg[1]
- {
+ // loop through all possible runes in default param
+ for {
+ // this acts as the break condition. Peek to see if we reached the end
+ switch t.scanner.peek() {
+ case '}':
+ return node, t.consumeRbrack()
+ }
param, err := t.parseParam(acceptNotClosing, scanIdent)
if err != nil {
return nil, err
}
- // param.Value = t.scanner.string()
node.Args = append(node.Args, param)
}
-
- return node, t.consumeRbrack()
}
// parses the ${param,} string function
diff --git a/vendor/gomodules.xyz/envsubst/parse/scan.go b/vendor/gomodules.xyz/envsubst/parse/scan.go
index 94812c160..2710879ac 100644
--- a/vendor/gomodules.xyz/envsubst/parse/scan.go
+++ b/vendor/gomodules.xyz/envsubst/parse/scan.go
@@ -34,17 +34,25 @@ const (
scanEscape
)
+// predefined mode bits to control escape tokens.
+const (
+ dollar byte = 1 << iota
+ backslash
+ escapeAll = dollar | backslash
+)
+
// returns true if rune is accepted.
type acceptFunc func(r rune, i int) bool
// scanner implements a lexical scanner that reads unicode
// characters and tokens from a string buffer.
type scanner struct {
- buf string
- pos int
- start int
- width int
- mode byte
+ buf string
+ pos int
+ start int
+ width int
+ mode byte
+ escapeChars byte
accept acceptFunc
}
@@ -98,6 +106,11 @@ func (s *scanner) string() string {
return s.buf[s.start:s.pos]
}
+// tests if the bit exists for a given character bit
+func (s *scanner) shouldEscape(character byte) bool {
+ return s.escapeChars&character != 0
+}
+
// scan reads the next token or Unicode character from source and
// returns it. It returns EOF at the end of the source.
func (s *scanner) scan() token {
@@ -176,25 +189,26 @@ func (s *scanner) scanRbrack(r rune) bool {
}
// scanEscaped reads the next token or Unicode character from source
-// and returns true if it being escaped and should be sipped.
+// and returns true if it being escaped and should be skipped.
func (s *scanner) scanEscaped(r rune) bool {
if s.mode&scanEscape == 0 {
return false
}
- if r == '$' {
+ if r == '$' && s.shouldEscape(dollar) {
if s.peek() == '$' {
return true
}
}
- if r != '\\' {
- return false
- }
- switch s.peek() {
- case '/', '\\':
- return true
- default:
- return false
+ if r == '\\' && s.shouldEscape(backslash) {
+ switch s.peek() {
+ case '/', '\\':
+ return true
+ default:
+ return false
+ }
}
+
+ return false
}
//
diff --git a/vendor/gomodules.xyz/envsubst/readme.md b/vendor/gomodules.xyz/envsubst/readme.md
index c86a6f41f..f5284129a 100644
--- a/vendor/gomodules.xyz/envsubst/readme.md
+++ b/vendor/gomodules.xyz/envsubst/readme.md
@@ -9,24 +9,30 @@ Includes support for bash string replacement functions.
## Supported Functions
-* `${var^}`
-* `${var^^}`
-* `${var,}`
-* `${var,,}`
-* `${var:position}`
-* `${var:position:length}`
-* `${var#substring}`
-* `${var##substring}`
-* `${var%substring}`
-* `${var%%substring}`
-* `${var/substring/replacement}`
-* `${var//substring/replacement}`
-* `${var/#substring/replacement}`
-* `${var/%substring/replacement}`
-* `${#var}`
-* `${var=default}`
-* `${var:=default}`
-* `${var:-default}`
+| __Expression__ | __Meaning__ |
+| ----------------- | -------------- |
+| `${var}` | Value of `$var`
+| `${#var}` | String length of `$var`
+| `${var^}` | Uppercase first character of `$var`
+| `${var^^}` | Uppercase all characters in `$var`
+| `${var,}` | Lowercase first character of `$var`
+| `${var,,}` | Lowercase all characters in `$var`
+| `${var:n}` | Offset `$var` `n` characters from start
+| `${var:n:len}` | Offset `$var` `n` characters with max length of `len`
+| `${var#pattern}` | Strip shortest `pattern` match from start
+| `${var##pattern}` | Strip longest `pattern` match from start
+| `${var%pattern}` | Strip shortest `pattern` match from end
+| `${var%%pattern}` | Strip longest `pattern` match from end
+| `${var-default` | If `$var` is not set, evaluate expression as `$default`
+| `${var:-default` | If `$var` is not set or is empty, evaluate expression as `$default`
+| `${var=default` | If `$var` is not set, evaluate expression as `$default`
+| `${var:=default` | If `$var` is not set or is empty, evaluate expression as `$default`
+| `${var/pattern/replacement}` | Replace as few `pattern` matches as possible with `replacement`
+| `${var//pattern/replacement}` | Replace as many `pattern` matches as possible with `replacement`
+| `${var/#pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` start
+| `${var/%pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` end
+
+For a deeper reference, see [bash-hackers](https://wiki.bash-hackers.org/syntax/pe#case_modification) or [gnu pattern matching](https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html).
## Unsupported Functions
@@ -35,4 +41,4 @@ Includes support for bash string replacement functions.
* `${var:?default}`
* `${var:+default}`
- [doc]: http://godoc.org/gomodules.xyz/envsubst
\ No newline at end of file
+[doc]: http://godoc.org/gomodules.xyz/envsubst
diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go
index cecbb9ba1..829383f55 100644
--- a/vendor/google.golang.org/api/internal/cba.go
+++ b/vendor/google.golang.org/api/internal/cba.go
@@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
s2aMTLSEndpoint: "",
}
- // Check the env to determine whether to use S2A.
- if !isGoogleS2AEnabled() {
+ if !shouldUseS2A(clientCertSource, settings) {
return &defaultTransportConfig, nil
}
- // If client cert is found, use that over S2A.
- // If MTLS is not enabled for the endpoint, skip S2A.
- if clientCertSource != nil || !mtlsEndpointEnabledForS2A() {
- return &defaultTransportConfig, nil
- }
s2aMTLSEndpoint := settings.DefaultMTLSEndpoint
// If there is endpoint override, honor it.
if settings.Endpoint != "" {
@@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
}, nil
}
-func isGoogleS2AEnabled() bool {
- return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true"
-}
-
// getClientCertificateSource returns a default client certificate source, if
// not provided by the user.
//
@@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun
return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil
}
+func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool {
+ // If client cert is found, use that over S2A.
+ if clientCertSource != nil {
+ return false
+ }
+ // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
+ if !isGoogleS2AEnabled() {
+ return false
+ }
+ // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A.
+ if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" {
+ return false
+ }
+ // If MTLS is not enabled for this endpoint, skip S2A.
+ if !mtlsEndpointEnabledForS2A() {
+ return false
+ }
+ // If custom HTTP client is provided, skip S2A.
+ if settings.HTTPClient != nil {
+ return false
+ }
+ return true
+}
+
// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection.
var mtlsEndpointEnabledForS2A = func() bool {
// TODO(xmenxk): determine this via discovery config.
return true
}
+
+func isGoogleS2AEnabled() bool {
+ return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true"
+}
diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go
index 693a1b1ab..f39dd00d9 100644
--- a/vendor/google.golang.org/api/internal/gensupport/send.go
+++ b/vendor/google.golang.org/api/internal/gensupport/send.go
@@ -15,6 +15,7 @@ import (
"github.com/google/uuid"
"github.com/googleapis/gax-go/v2"
+ "github.com/googleapis/gax-go/v2/callctx"
)
// Use this error type to return an error which allows introspection of both
@@ -43,6 +44,16 @@ func (e wrappedCallErr) Is(target error) bool {
// req.WithContext, then calls any functions returned by the hooks in
// reverse order.
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ // Add headers set in context metadata.
+ if ctx != nil {
+ headers := callctx.HeadersFromContext(ctx)
+ for k, vals := range headers {
+ for _, v := range vals {
+ req.Header.Add(k, v)
+ }
+ }
+ }
+
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
if _, ok := req.Header["Accept-Encoding"]; ok {
@@ -77,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re
// req.WithContext, then calls any functions returned by the hooks in
// reverse order.
func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) {
+ // Add headers set in context metadata.
+ if ctx != nil {
+ headers := callctx.HeadersFromContext(ctx)
+ for k, vals := range headers {
+ for _, v := range vals {
+ req.Header.Add(k, v)
+ }
+ }
+ }
+
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
if _, ok := req.Header["Accept-Encoding"]; ok {
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 46ad187ec..2f04086a8 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.126.0"
+const Version = "0.140.0"
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index edebc73ad..621207118 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -26,7 +26,7 @@
"description": "Stores and retrieves potentially large, immutable data objects.",
"discoveryVersion": "v1",
"documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "etag": "\"34333739363230323936363635393736363430\"",
+ "etag": "\"39353535313838393033333032363632303533\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -1311,7 +1311,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1357,7 +1357,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1399,7 +1399,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1444,7 +1444,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1493,7 +1493,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1545,7 +1545,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1588,7 +1588,7 @@
"type": "string"
},
"destinationObject": {
- "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1662,7 +1662,7 @@
],
"parameters": {
"destinationBucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1773,7 +1773,7 @@
"type": "string"
},
"sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1843,7 +1843,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1907,7 +1907,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -1967,7 +1967,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2054,7 +2054,7 @@
"type": "string"
},
"name": {
- "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "query",
"type": "string"
},
@@ -2252,7 +2252,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2332,7 +2332,7 @@
"type": "string"
},
"destinationObject": {
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2443,7 +2443,7 @@
"type": "string"
},
"sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2489,7 +2489,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2536,7 +2536,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -2612,7 +2612,7 @@
"type": "string"
},
"object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
"location": "path",
"required": true,
"type": "string"
@@ -3010,7 +3010,7 @@
}
}
},
- "revision": "20230301",
+ "revision": "20230710",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
"Bucket": {
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index e11bf2e6d..a3f659149 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -10,6 +10,17 @@
//
// For product documentation, see: https://developers.google.com/storage/docs/json_api/
//
+// # Library status
+//
+// These client libraries are officially supported by Google. However, this
+// library is considered complete and is in maintenance mode. This means
+// that we will address critical bugs and security issues but will not add
+// any new features.
+//
+// When possible, we recommend using our newer
+// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go)
+// that are still actively being worked and iterated on.
+//
// # Creating a client
//
// Usage example:
@@ -19,28 +30,31 @@
// ctx := context.Background()
// storageService, err := storage.NewService(ctx)
//
-// In this example, Google Application Default Credentials are used for authentication.
-//
-// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
+// In this example, Google Application Default Credentials are used for
+// authentication. For information on how to create and obtain Application
+// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// # Other authentication options
//
-// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:
+// By default, all available scopes (see "Constants") are used to authenticate.
+// To restrict scopes, use [google.golang.org/api/option.WithScopes]:
//
// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope))
//
-// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
+// To use an API key for authentication (note: some APIs do not support API
+// keys), use [google.golang.org/api/option.WithAPIKey]:
//
// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza..."))
//
-// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
+// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth
+// flow, use [google.golang.org/api/option.WithTokenSource]:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
-// See https://godoc.org/google.golang.org/api/option/ for details on options.
+// See [google.golang.org/api/option.ClientOption] for details on options.
package storage // import "google.golang.org/api/storage/v1"
import (
@@ -7260,7 +7274,8 @@ type ObjectAccessControlsDeleteCall struct {
// user-emailAddress, group-groupId, group-emailAddress, allUsers, or
// allAuthenticatedUsers.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -7375,7 +7390,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -7416,7 +7431,8 @@ type ObjectAccessControlsGetCall struct {
// user-emailAddress, group-groupId, group-emailAddress, allUsers, or
// allAuthenticatedUsers.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -7569,7 +7585,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -7608,7 +7624,8 @@ type ObjectAccessControlsInsertCall struct {
//
// - bucket: Name of a bucket.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -7745,7 +7762,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -7787,7 +7804,8 @@ type ObjectAccessControlsListCall struct {
//
// - bucket: Name of a bucket.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -7931,7 +7949,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -7974,7 +7992,8 @@ type ObjectAccessControlsPatchCall struct {
// user-emailAddress, group-groupId, group-emailAddress, allUsers, or
// allAuthenticatedUsers.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -8120,7 +8139,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -8166,7 +8185,8 @@ type ObjectAccessControlsUpdateCall struct {
// user-emailAddress, group-groupId, group-emailAddress, allUsers, or
// allAuthenticatedUsers.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -8312,7 +8332,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -8357,7 +8377,8 @@ type ObjectsComposeCall struct {
// objects. The destination object is stored in this bucket.
// - destinationObject: Name of the new object. For information about
// how to URL encode object names to be path safe, see Encoding URI
-// Path Parts.
+// Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {
c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.destinationBucket = destinationBucket
@@ -8540,7 +8561,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "destinationObject": {
- // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -8625,7 +8646,8 @@ type ObjectsCopyCall struct {
// - destinationBucket: Name of the bucket in which to store the new
// object. Overrides the provided object metadata's bucket value, if
// any.For information about how to URL encode object names to be path
-// safe, see Encoding URI Path Parts.
+// safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
// - destinationObject: Name of the new object. Required when the object
// metadata is not otherwise provided. Overrides the object metadata's
// name value, if any.
@@ -8633,7 +8655,8 @@ type ObjectsCopyCall struct {
// object.
// - sourceObject: Name of the source object. For information about how
// to URL encode object names to be path safe, see Encoding URI Path
-// Parts.
+// Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {
c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sourceBucket = sourceBucket
@@ -8894,7 +8917,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// ],
// "parameters": {
// "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -9005,7 +9028,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -9049,7 +9072,8 @@ type ObjectsDeleteCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -9215,7 +9239,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -9252,7 +9276,8 @@ type ObjectsGetCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -9484,7 +9509,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -9541,7 +9566,8 @@ type ObjectsGetIamPolicyCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall {
c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -9685,7 +9711,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -9797,7 +9823,8 @@ func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall {
// Name sets the optional parameter "name": Name of the object. Required
// when the object metadata is not otherwise provided. Overrides the
// object metadata's name value, if any. For information about how to
-// URL encode object names to be path safe, see Encoding URI Path Parts.
+// URL encode object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
c.urlParams_.Set("name", name)
return c
@@ -10107,7 +10134,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "name": {
- // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "query",
// "type": "string"
// },
@@ -10517,7 +10544,8 @@ type ObjectsPatchCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -10756,7 +10784,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -10839,12 +10867,14 @@ type ObjectsRewriteCall struct {
// - destinationObject: Name of the new object. Required when the object
// metadata is not otherwise provided. Overrides the object metadata's
// name value, if any. For information about how to URL encode object
-// names to be path safe, see Encoding URI Path Parts.
+// names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
// - sourceBucket: Name of the bucket in which to find the source
// object.
// - sourceObject: Name of the source object. For information about how
// to URL encode object names to be path safe, see Encoding URI Path
-// Parts.
+// Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {
c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sourceBucket = sourceBucket
@@ -11140,7 +11170,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse,
// "type": "string"
// },
// "destinationObject": {
- // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -11251,7 +11281,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse,
// "type": "string"
// },
// "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -11294,7 +11324,8 @@ type ObjectsSetIamPolicyCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall {
c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -11431,7 +11462,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -11475,7 +11506,8 @@ type ObjectsTestIamPermissionsCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
// - permissions: Permissions to test.
func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall {
c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -11622,7 +11654,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
@@ -11671,7 +11703,8 @@ type ObjectsUpdateCall struct {
//
// - bucket: Name of the bucket in which the object resides.
// - object: Name of the object. For information about how to URL encode
-// object names to be path safe, see Encoding URI Path Parts.
+// object names to be path safe, see Encoding URI Path Parts
+// (https://cloud.google.com/storage/docs/request-endpoints#encoding).
func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -11910,7 +11943,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).",
// "location": "path",
// "required": true,
// "type": "string"
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
deleted file mode 100644
index 6d03f4d36..000000000
--- a/vendor/google.golang.org/appengine/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go_import_path: google.golang.org/appengine
-
-install:
- - ./travis_install.sh
-
-script:
- - ./travis_test.sh
-
-matrix:
- include:
- - go: 1.9.x
- env: GOAPP=true
- - go: 1.10.x
- env: GOAPP=false
- - go: 1.11.x
- env: GO111MODULE=on
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
index ffc298520..289693613 100644
--- a/vendor/google.golang.org/appengine/CONTRIBUTING.md
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -19,14 +19,12 @@
## Running system tests
-Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
-
Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
-Run tests with `goapp test`:
+Run tests with `go test`:
```
-goapp test -v google.golang.org/appengine/...
+go test -v google.golang.org/appengine/...
```
## Contributor License Agreements
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
index 9fdbacd3c..5ccddd999 100644
--- a/vendor/google.golang.org/appengine/README.md
+++ b/vendor/google.golang.org/appengine/README.md
@@ -1,6 +1,6 @@
# Go App Engine packages
-[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml)
This repository supports the Go runtime on *App Engine standard*.
It provides APIs for interacting with App Engine services.
@@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/
Most App Engine services are available with exactly the same API.
A few APIs were cleaned up, and there are some differences:
-* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* `appengine.Context` has been replaced with the `Context` type from `context`.
* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
* `appengine.Datacenter` now takes a `context.Context` argument.
@@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences:
* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
Use the standard `net` package instead.
-## Key Encode/Decode compatibiltiy to help with datastore library migrations
+## Key Encode/Decode compatibility to help with datastore library migrations
Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore.
The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
index 8c9697674..35ba9c896 100644
--- a/vendor/google.golang.org/appengine/appengine.go
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -9,10 +9,10 @@
package appengine // import "google.golang.org/appengine"
import (
+ "context"
"net/http"
"github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
"google.golang.org/appengine/internal"
)
@@ -35,18 +35,18 @@ import (
//
// Main is designed so that the app's main package looks like this:
//
-// package main
+// package main
//
-// import (
-// "google.golang.org/appengine"
+// import (
+// "google.golang.org/appengine"
//
-// _ "myapp/package0"
-// _ "myapp/package1"
-// )
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
//
-// func main() {
-// appengine.Main()
-// }
+// func main() {
+// appengine.Main()
+// }
//
// The "myapp/packageX" packages are expected to register HTTP handlers
// in their init functions.
@@ -54,6 +54,9 @@ func Main() {
internal.Main()
}
+// Middleware wraps an http handler so that it can make GAE API calls
+var Middleware func(http.Handler) http.Handler = internal.Middleware
+
// IsDevAppServer reports whether the App Engine app is running in the
// development App Server.
func IsDevAppServer() bool {
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
index f4b645aad..6e1d041cd 100644
--- a/vendor/google.golang.org/appengine/appengine_vm.go
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -2,19 +2,19 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build !appengine
// +build !appengine
package appengine
import (
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
+ "context"
)
// BackgroundContext returns a context not associated with a request.
-// This should only be used when not servicing a request.
-// This only works in App Engine "flexible environment".
+//
+// Deprecated: App Engine no longer has a special background context.
+// Just use context.Background().
func BackgroundContext() context.Context {
- return internal.BackgroundContext()
+ return context.Background()
}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
index b8dcf8f36..1202fc1a5 100644
--- a/vendor/google.golang.org/appengine/identity.go
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -5,10 +5,9 @@
package appengine
import (
+ "context"
"time"
- "golang.org/x/net/context"
-
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/app_identity"
modpb "google.golang.org/appengine/internal/modules"
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
index 721053c20..0569f5dd4 100644
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -2,12 +2,14 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build !appengine
// +build !appengine
package internal
import (
"bytes"
+ "context"
"errors"
"fmt"
"io/ioutil"
@@ -24,7 +26,6 @@ import (
"time"
"github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
logpb "google.golang.org/appengine/internal/log"
@@ -32,8 +33,7 @@ import (
)
const (
- apiPath = "/rpc_http"
- defaultTicketSuffix = "/default.20150612t184001.0"
+ apiPath = "/rpc_http"
)
var (
@@ -65,21 +65,22 @@ var (
IdleConnTimeout: 90 * time.Second,
},
}
-
- defaultTicketOnce sync.Once
- defaultTicket string
- backgroundContextOnce sync.Once
- backgroundContext netcontext.Context
)
-func apiURL() *url.URL {
+func apiURL(ctx context.Context) *url.URL {
host, port := "appengine.googleapis.internal", "10001"
if h := os.Getenv("API_HOST"); h != "" {
host = h
}
+ if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil {
+ host = hostOverride.(string)
+ }
if p := os.Getenv("API_PORT"); p != "" {
port = p
}
+ if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil {
+ port = portOverride.(string)
+ }
return &url.URL{
Scheme: "http",
Host: host + ":" + port,
@@ -87,82 +88,97 @@ func apiURL() *url.URL {
}
}
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- r = r.WithContext(withContext(r.Context(), c))
- c.req = r
-
- stopFlushing := make(chan int)
+// Middleware wraps an http handler so that it can make GAE API calls
+func Middleware(next http.Handler) http.Handler {
+ return handleHTTPMiddleware(executeRequestSafelyMiddleware(next))
+}
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
+func handleHTTPMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c := &aeContext{
+ req: r,
+ outHeader: w.Header(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
+ if logToLogservice() {
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+ }
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
+ next.ServeHTTP(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
- stopFlushing <- 1 // any logging beyond this point will be dropped
+ flushed := make(chan struct{})
+ if logToLogservice() {
+ stopFlushing <- 1 // any logging beyond this point will be dropped
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- flushed := make(chan struct{})
- go func() {
- defer close(flushed)
- // Force a log flush, because with very short requests we
- // may not ever flush logs.
- c.flushLog(true)
- }()
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go func() {
+ defer close(flushed)
+ // Force a log flush, because with very short requests we
+ // may not ever flush logs.
+ c.flushLog(true)
+ }()
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+ }
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
- // Wait for the last flush to complete before returning,
- // otherwise the security ticket will not be valid.
- <-flushed
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+ if logToLogservice() {
+ // Wait for the last flush to complete before returning,
+ // otherwise the security ticket will not be valid.
+ <-flushed
+ }
+ })
}
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
+func executeRequestSafelyMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ c := w.(*aeContext)
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
- http.DefaultServeMux.ServeHTTP(c, r)
+ next.ServeHTTP(w, r)
+ })
}
func renderPanic(x interface{}) string {
@@ -204,9 +220,9 @@ func renderPanic(x interface{}) string {
return string(buf)
}
-// context represents the context of an in-flight HTTP request.
+// aeContext represents the aeContext of an in-flight HTTP request.
// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
+type aeContext struct {
req *http.Request
outCode int
@@ -218,8 +234,6 @@ type context struct {
lines []*logpb.UserAppLogLine
flushes int
}
-
- apiURL *url.URL
}
var contextKey = "holds a *context"
@@ -227,8 +241,8 @@ var contextKey = "holds a *context"
// jointContext joins two contexts in a superficial way.
// It takes values and timeouts from a base context, and only values from another context.
type jointContext struct {
- base netcontext.Context
- valuesOnly netcontext.Context
+ base context.Context
+ valuesOnly context.Context
}
func (c jointContext) Deadline() (time.Time, bool) {
@@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} {
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
+func fromContext(ctx context.Context) *aeContext {
+ c, _ := ctx.Value(&contextKey).(*aeContext)
return c
}
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
+func withContext(parent context.Context, c *aeContext) context.Context {
+ ctx := context.WithValue(parent, &contextKey, c)
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
ctx = withNamespace(ctx, ns)
}
return ctx
}
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
+func toContext(c *aeContext) context.Context {
+ return withContext(context.Background(), c)
}
-func IncomingHeaders(ctx netcontext.Context) http.Header {
+func IncomingHeaders(ctx context.Context) http.Header {
if c := fromContext(ctx); c != nil {
return c.req.Header
}
return nil
}
-func ReqContext(req *http.Request) netcontext.Context {
+func ReqContext(req *http.Request) context.Context {
return req.Context()
}
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+func WithContext(parent context.Context, req *http.Request) context.Context {
return jointContext{
base: parent,
valuesOnly: req.Context(),
}
}
-// DefaultTicket returns a ticket used for background context or dev_appserver.
-func DefaultTicket() string {
- defaultTicketOnce.Do(func() {
- if IsDevAppServer() {
- defaultTicket = "testapp" + defaultTicketSuffix
- return
- }
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
- })
- return defaultTicket
-}
-
-func BackgroundContext() netcontext.Context {
- backgroundContextOnce.Do(func() {
- // Compute background security ticket.
- ticket := DefaultTicket()
-
- c := &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
- backgroundContext = toContext(c)
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go c.logFlusher(make(chan int))
- })
-
- return backgroundContext
-}
-
// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
+// any API calls are sent to the provided URL.
// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctx := withContext(decorate(req.Context()), c)
- req = req.WithContext(ctx)
- c.req = req
- return req, func() {}
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request {
+ ctx := req.Context()
+ ctx = withAPIHostOverride(ctx, apiURL.Hostname())
+ ctx = withAPIPortOverride(ctx, apiURL.Port())
+ ctx = WithAppIDOverride(ctx, appID)
+
+ // use the unregistered request as a placeholder so that withContext can read the headers
+ c := &aeContext{req: req}
+ c.req = req.WithContext(withContext(ctx, c))
+ return c.req
}
var errTimeout = &CallError{
@@ -348,7 +322,7 @@ var errTimeout = &CallError{
Timeout: true,
}
-func (c *context) Header() http.Header { return c.outHeader }
+func (c *aeContext) Header() http.Header { return c.outHeader }
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
// codes do not permit a response body (nor response entity headers such as
@@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool {
return true
}
-func (c *context) Write(b []byte) (int, error) {
+func (c *aeContext) Write(b []byte) (int, error) {
if c.outCode == 0 {
c.WriteHeader(http.StatusOK)
}
@@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) {
return len(b), nil
}
-func (c *context) WriteHeader(code int) {
+func (c *aeContext) WriteHeader(code int) {
if c.outCode != 0 {
logf(c, 3, "WriteHeader called multiple times on request.") // error level
return
@@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) {
c.outCode = code
}
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) {
+ apiURL := apiURL(ctx)
hreq := &http.Request{
Method: "POST",
- URL: c.apiURL,
+ URL: apiURL,
Header: http.Header{
apiEndpointHeader: apiEndpointHeaderValue,
apiMethodHeader: apiMethodHeaderValue,
@@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error)
},
Body: ioutil.NopCloser(bytes.NewReader(body)),
ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
+ Host: apiURL.Host,
}
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
+ c := fromContext(ctx)
+ if c != nil {
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
}
tr := apiHTTPClient.Transport.(*http.Transport)
@@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error)
return hrespBody, nil
}
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+func Call(ctx context.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
@@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
}
c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
// Apply transaction modifications if we're in a transaction.
if t := transactionFromContext(ctx); t != nil {
if t.finished {
- return errors.New("transaction context has expired")
+ return errors.New("transaction aeContext has expired")
}
applyTransaction(in, &t.transaction)
}
@@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
return err
}
- ticket := c.req.Header.Get(ticketHeader)
- // Use a test ticket under test environment.
- if ticket == "" {
- if appid := ctx.Value(&appIDOverrideKey); appid != nil {
- ticket = appid.(string) + defaultTicketSuffix
+ ticket := ""
+ if c != nil {
+ ticket = c.req.Header.Get(ticketHeader)
+ if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
+ ticket = dri
}
}
- // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
- if ticket == "" {
- ticket = DefaultTicket()
- }
- if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
- ticket = dri
- }
req := &remotepb.Request{
ServiceName: &service,
Method: &method,
@@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
return err
}
- hrespBody, err := c.post(hreqBody, timeout)
+ hrespBody, err := post(ctx, hreqBody, timeout)
if err != nil {
return err
}
@@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
return proto.Unmarshal(res.Response, out)
}
-func (c *context) Request() *http.Request {
+func (c *aeContext) Request() *http.Request {
return c.req
}
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) {
// Truncate long log lines.
// TODO(dsymonds): Check if this is still necessary.
const lim = 8 << 10
@@ -575,18 +542,20 @@ var logLevelName = map[int64]string{
4: "CRITICAL",
}
-func logf(c *context, level int64, format string, args ...interface{}) {
+func logf(c *aeContext, level int64, format string, args ...interface{}) {
if c == nil {
- panic("not an App Engine context")
+ panic("not an App Engine aeContext")
}
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- // Only duplicate log to stderr if not running on App Engine second generation
+ if logToLogservice() {
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ }
+ // Log to stdout if not deployed
if !IsSecondGen() {
log.Print(logLevelName[level] + ": " + s)
}
@@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) {
// flushLog attempts to flush any pending logs to the appserver.
// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
+func (c *aeContext) flushLog(force bool) (flushed bool) {
c.pendingLogs.Lock()
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
n, rem := 0, 30<<20
@@ -655,7 +624,7 @@ const (
forceFlushInterval = 60 * time.Second
)
-func (c *context) logFlusher(stop <-chan int) {
+func (c *aeContext) logFlusher(stop <-chan int) {
lastFlush := time.Now()
tick := time.NewTicker(flushInterval)
for {
@@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) {
}
}
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
+func ContextForTesting(req *http.Request) context.Context {
+ return toContext(&aeContext{req: req})
+}
+
+func logToLogservice() bool {
+ // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json
+ // where $LOG_DIR is /var/log in prod and some tmpdir in dev
+ return os.Getenv("LOG_TO_LOGSERVICE") != "0"
}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
index f0f40b2e3..87c33c798 100644
--- a/vendor/google.golang.org/appengine/internal/api_classic.go
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -2,11 +2,13 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build appengine
// +build appengine
package internal
import (
+ "context"
"errors"
"fmt"
"net/http"
@@ -17,20 +19,19 @@ import (
basepb "appengine_internal/base"
"github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
)
var contextKey = "holds an appengine.Context"
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) appengine.Context {
+func fromContext(ctx context.Context) appengine.Context {
c, _ := ctx.Value(&contextKey).(appengine.Context)
return c
}
// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) {
c := fromContext(ctx)
if c == nil {
return nil, errNotAppEngineContext
@@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error
return c, nil
}
-func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
+func withContext(parent context.Context, c appengine.Context) context.Context {
+ ctx := context.WithValue(parent, &contextKey, c)
s := &basepb.StringProto{}
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
@@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont
return ctx
}
-func IncomingHeaders(ctx netcontext.Context) http.Header {
+func IncomingHeaders(ctx context.Context) http.Header {
if c := fromContext(ctx); c != nil {
if req, ok := c.Request().(*http.Request); ok {
return req.Header
@@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header {
return nil
}
-func ReqContext(req *http.Request) netcontext.Context {
- return WithContext(netcontext.Background(), req)
+func ReqContext(req *http.Request) context.Context {
+ return WithContext(context.Background(), req)
}
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+func WithContext(parent context.Context, req *http.Request) context.Context {
c := appengine.NewContext(req)
return withContext(parent, c)
}
@@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr
}
func (t *testingContext) Request() interface{} { return t.req }
-func ContextForTesting(req *http.Request) netcontext.Context {
- return withContext(netcontext.Background(), &testingContext{req: req})
+func ContextForTesting(req *http.Request) context.Context {
+ return withContext(context.Background(), &testingContext{req: req})
}
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+func Call(ctx context.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
@@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
return err
}
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- panic("handleHTTP called; this should be impossible")
+func Middleware(next http.Handler) http.Handler {
+ panic("Middleware called; this should be impossible")
}
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
index e0c0b214b..5b95c13d9 100644
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -5,20 +5,26 @@
package internal
import (
+ "context"
"errors"
"os"
"github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
)
+type ctxKey string
+
+func (c ctxKey) String() string {
+ return "appengine context key: " + string(c)
+}
+
var errNotAppEngineContext = errors.New("not an App Engine context")
-type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error
var callOverrideKey = "holds []CallOverrideFunc"
-func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context {
// We avoid appending to any existing call override
// so we don't risk overwriting a popped stack below.
var cofs []CallOverrideFunc
@@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con
cofs = append(cofs, uf...)
}
cofs = append(cofs, f)
- return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+ return context.WithValue(ctx, &callOverrideKey, cofs)
}
-func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) {
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
if len(cofs) == 0 {
return nil, nil, false
@@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte
// We found a list of overrides; grab the last, and reconstitute a
// context that will hide it.
f := cofs[len(cofs)-1]
- ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
return f, ctx, true
}
@@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{})
var logOverrideKey = "holds a logOverrideFunc"
-func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
- return netcontext.WithValue(ctx, &logOverrideKey, f)
+func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context {
+ return context.WithValue(ctx, &logOverrideKey, f)
}
var appIDOverrideKey = "holds a string, being the full app ID"
-func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
- return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+func WithAppIDOverride(ctx context.Context, appID string) context.Context {
+ return context.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST")
+
+func withAPIHostOverride(ctx context.Context, apiHost string) context.Context {
+ return context.WithValue(ctx, apiHostOverrideKey, apiHost)
+}
+
+var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT")
+
+func withAPIPortOverride(ctx context.Context, apiPort string) context.Context {
+ return context.WithValue(ctx, apiPortOverrideKey, apiPort)
}
var namespaceKey = "holds the namespace string"
-func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
- return netcontext.WithValue(ctx, &namespaceKey, ns)
+func withNamespace(ctx context.Context, ns string) context.Context {
+ return context.WithValue(ctx, &namespaceKey, ns)
}
-func NamespaceFromContext(ctx netcontext.Context) string {
+func NamespaceFromContext(ctx context.Context) string {
// If there's no namespace, return the empty string.
ns, _ := ctx.Value(&namespaceKey).(string)
return ns
@@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string {
// FullyQualifiedAppID returns the fully-qualified application ID.
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx netcontext.Context) string {
+func FullyQualifiedAppID(ctx context.Context) string {
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
return id
}
return fullyQualifiedAppID(ctx)
}
-func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+func Logf(ctx context.Context, level int64, format string, args ...interface{}) {
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
f(level, format, args...)
return
@@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{
}
// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+func NamespacedContext(ctx context.Context, namespace string) context.Context {
return withNamespace(ctx, namespace)
}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
index 9b4134e42..0f95aa91d 100644
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -5,9 +5,8 @@
package internal
import (
+ "context"
"os"
-
- netcontext "golang.org/x/net/context"
)
var (
@@ -23,7 +22,7 @@ var (
// AppID is the implementation of the wrapper function of the same name in
// ../identity.go. See that file for commentary.
-func AppID(c netcontext.Context) string {
+func AppID(c context.Context) string {
return appID(FullyQualifiedAppID(c))
}
@@ -35,7 +34,7 @@ func IsStandard() bool {
return appengineStandard || IsSecondGen()
}
-// IsStandard is the implementation of the wrapper function of the same name in
+// IsSecondGen is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsSecondGen() bool {
// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
index 4e979f45e..5ad3548bf 100644
--- a/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -2,21 +2,22 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build appengine
// +build appengine
package internal
import (
- "appengine"
+ "context"
- netcontext "golang.org/x/net/context"
+ "appengine"
)
func init() {
appengineStandard = true
}
-func DefaultVersionHostname(ctx netcontext.Context) string {
+func DefaultVersionHostname(ctx context.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string {
return appengine.DefaultVersionHostname(c)
}
-func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string { return appengine.ServerSoftware() }
-func InstanceID() string { return appengine.InstanceID() }
-func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+func Datacenter(_ context.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
-func RequestID(ctx netcontext.Context) string {
+func RequestID(ctx context.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string {
return appengine.RequestID(c)
}
-func ModuleName(ctx netcontext.Context) string {
+func ModuleName(ctx context.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.ModuleName(c)
}
-func VersionID(ctx netcontext.Context) string {
+func VersionID(ctx context.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string {
return appengine.VersionID(c)
}
-func fullyQualifiedAppID(ctx netcontext.Context) string {
+func fullyQualifiedAppID(ctx context.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
index d5e2e7b5e..4201b6b58 100644
--- a/vendor/google.golang.org/appengine/internal/identity_flex.go
+++ b/vendor/google.golang.org/appengine/internal/identity_flex.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build appenginevm
// +build appenginevm
package internal
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
index 5d8067263..18ddda3a4 100644
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -2,17 +2,17 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build !appengine
// +build !appengine
package internal
import (
+ "context"
"log"
"net/http"
"os"
"strings"
-
- netcontext "golang.org/x/net/context"
)
// These functions are implementations of the wrapper functions
@@ -24,7 +24,7 @@ const (
hDatacenter = "X-AppEngine-Datacenter"
)
-func ctxHeaders(ctx netcontext.Context) http.Header {
+func ctxHeaders(ctx context.Context) http.Header {
c := fromContext(ctx)
if c == nil {
return nil
@@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header {
return c.Request().Header
}
-func DefaultVersionHostname(ctx netcontext.Context) string {
+func DefaultVersionHostname(ctx context.Context) string {
return ctxHeaders(ctx).Get(hDefaultVersionHostname)
}
-func RequestID(ctx netcontext.Context) string {
+func RequestID(ctx context.Context) string {
return ctxHeaders(ctx).Get(hRequestLogId)
}
-func Datacenter(ctx netcontext.Context) string {
+func Datacenter(ctx context.Context) string {
if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
return dc
}
@@ -71,7 +71,7 @@ func ServerSoftware() string {
// TODO(dsymonds): Remove the metadata fetches.
-func ModuleName(_ netcontext.Context) string {
+func ModuleName(_ context.Context) string {
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
return s
}
@@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string {
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
}
-func VersionID(_ netcontext.Context) string {
+func VersionID(_ context.Context) string {
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
return s1 + "." + s2
}
@@ -112,7 +112,7 @@ func partitionlessAppID() string {
return string(mustGetMetadata("instance/attributes/gae_project"))
}
-func fullyQualifiedAppID(_ netcontext.Context) string {
+func fullyQualifiedAppID(_ context.Context) string {
if s := os.Getenv("GAE_APPLICATION"); s != "" {
return s
}
@@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string {
}
func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev"
}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
index 1e765312f..afd0ae84f 100644
--- a/vendor/google.golang.org/appengine/internal/main.go
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build appengine
// +build appengine
package internal
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
index ddb79a333..86a8caf06 100644
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
+//go:build !appengine
// +build !appengine
package internal
@@ -29,7 +30,7 @@ func Main() {
if IsDevAppServer() {
host = "127.0.0.1"
}
- if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil {
log.Fatalf("http.ListenAndServe: %v", err)
}
}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
index 9006ae653..2ae8ab9fa 100644
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -7,11 +7,11 @@ package internal
// This file implements hooks for applying datastore transactions.
import (
+ "context"
"errors"
"reflect"
"github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
pb "google.golang.org/appengine/internal/datastore"
@@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) {
var transactionKey = "used for *Transaction"
-func transactionFromContext(ctx netcontext.Context) *transaction {
+func transactionFromContext(ctx context.Context) *transaction {
t, _ := ctx.Value(&transactionKey).(*transaction)
return t
}
-func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
- return netcontext.WithValue(ctx, &transactionKey, t)
+func withTransaction(ctx context.Context, t *transaction) context.Context {
+ return context.WithValue(ctx, &transactionKey, t)
}
type transaction struct {
@@ -54,7 +54,7 @@ type transaction struct {
var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
+func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
if transactionFromContext(c) != nil {
return nil, errors.New("nested transactions are not supported")
}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
index 21860ca08..6f169be48 100644
--- a/vendor/google.golang.org/appengine/namespace.go
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -5,11 +5,10 @@
package appengine
import (
+ "context"
"fmt"
"regexp"
- "golang.org/x/net/context"
-
"google.golang.org/appengine/internal"
)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
index 05642a992..fcf3ad0a5 100644
--- a/vendor/google.golang.org/appengine/timeout.go
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -4,7 +4,7 @@
package appengine
-import "golang.org/x/net/context"
+import "context"
// IsTimeoutError reports whether err is a timeout error.
func IsTimeoutError(err error) bool {
diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh
deleted file mode 100644
index 785b62f46..000000000
--- a/vendor/google.golang.org/appengine/travis_install.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-set -e
-
-if [[ $GO111MODULE == "on" ]]; then
- go get .
-else
- go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine)
-fi
-
-if [[ $GOAPP == "true" ]]; then
- mkdir /tmp/sdk
- curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip"
- unzip -q /tmp/sdk.zip -d /tmp/sdk
- # NOTE: Set the following env vars in the test script:
- # export PATH="$PATH:/tmp/sdk/go_appengine"
- # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
-fi
-
diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh
deleted file mode 100644
index d4390f045..000000000
--- a/vendor/google.golang.org/appengine/travis_test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -e
-
-go version
-go test -v google.golang.org/appengine/...
-go test -v -race google.golang.org/appengine/...
-if [[ $GOAPP == "true" ]]; then
- export PATH="$PATH:/tmp/sdk/go_appengine"
- export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
- goapp version
- goapp test -v google.golang.org/appengine/...
-fi
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
index 6ffe1e6d9..6c0d72418 100644
--- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -7,6 +7,7 @@
package urlfetch // import "google.golang.org/appengine/urlfetch"
import (
+ "context"
"errors"
"fmt"
"io"
@@ -18,7 +19,6 @@ import (
"time"
"github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/urlfetch"
@@ -44,11 +44,10 @@ type Transport struct {
var _ http.RoundTripper = (*Transport)(nil)
// Client returns an *http.Client using a default urlfetch Transport. This
-// client will have the default deadline of 5 seconds, and will check the
-// validity of SSL certificates.
+// client will check the validity of SSL certificates.
//
-// Any deadline of the provided context will be used for requests through this client;
-// if the client does not have a deadline then a 5 second default is used.
+// Any deadline of the provided context will be used for requests through this client.
+// If the client does not have a deadline, then an App Engine default of 60 second is used.
func Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &Transport{
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
index dbe2e2d0c..6ce01ac9a 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v3.21.12
// source: google/api/field_behavior.proto
package annotations
@@ -78,6 +78,19 @@ const (
// a non-empty value will be returned. The user will not be aware of what
// non-empty value to expect.
FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
+ // Denotes that the field in a resource (a message annotated with
+ // google.api.resource) is used in the resource name to uniquely identify the
+ // resource. For AIP-compliant APIs, this should only be applied to the
+ // `name` field on the resource.
+ //
+ // This behavior should not be applied to references to other resources within
+ // the message.
+ //
+ // The identifier field of resources often have different field behavior
+ // depending on the request it is embedded in (e.g. for Create methods name
+ // is optional and unused, while for Update methods it is required). Instead
+ // of method-specific annotations, only `IDENTIFIER` is required.
+ FieldBehavior_IDENTIFIER FieldBehavior = 8
)
// Enum value maps for FieldBehavior.
@@ -91,6 +104,7 @@ var (
5: "IMMUTABLE",
6: "UNORDERED_LIST",
7: "NON_EMPTY_DEFAULT",
+ 8: "IDENTIFIER",
}
FieldBehavior_value = map[string]int32{
"FIELD_BEHAVIOR_UNSPECIFIED": 0,
@@ -101,6 +115,7 @@ var (
"IMMUTABLE": 5,
"UNORDERED_LIST": 6,
"NON_EMPTY_DEFAULT": 7,
+ "IDENTIFIER": 8,
}
)
@@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{
0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
- 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
@@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{
0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
+ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 1bc92248c..ab0fbb79b 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,8 +1,8 @@
# gRPC-Go
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go)
The [Go][] implementation of [gRPC][]: A high performance, open source, general
RPC framework that puts mobile and HTTP/2 first. For more information see the
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 712fef4d0..52d530d7a 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -121,9 +121,9 @@ func (a *Attributes) String() string {
return sb.String()
}
-func str(x any) string {
+func str(x any) (s string) {
if v, ok := x.(fmt.Stringer); ok {
- return v.String()
+ return fmt.Sprint(v)
} else if v, ok := x.(string); ok {
return v
}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index b6377f445..d79560a2e 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@@ -39,6 +40,8 @@ import (
var (
// m is a map from name to balancer builder.
m = make(map[string]Builder)
+
+ logger = grpclog.Component("balancer")
)
// Register registers the balancer builder to the balancer map. b.Name
@@ -51,6 +54,12 @@ var (
// an init() function), and is not thread-safe. If multiple Balancers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
+ if strings.ToLower(b.Name()) != b.Name() {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+ }
m[strings.ToLower(b.Name())] = b
}
@@ -70,6 +79,12 @@ func init() {
// Note that the compare is done in a case-insensitive fashion.
// If no builder is register with the name, nil will be returned.
func Get(name string) Builder {
+ if strings.ToLower(name) != name {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+ }
if b, ok := m[strings.ToLower(name)]; ok {
return b
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index ff7fea102..429c389e4 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error {
return errConnClosing
}
if cc.idlenessState != ccIdlenessStateIdle {
- cc.mu.Unlock()
channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
+ cc.mu.Unlock()
return nil
}
@@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error {
// name resolver, load balancer and any subchannels.
func (cc *ClientConn) enterIdleMode() error {
cc.mu.Lock()
+ defer cc.mu.Unlock()
+
if cc.conns == nil {
- cc.mu.Unlock()
return ErrClientConnClosing
}
if cc.idlenessState != ccIdlenessStateActive {
- channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
- cc.mu.Unlock()
+ channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
return nil
}
@@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error {
cc.balancerWrapper.enterIdleMode()
cc.csMgr.updateState(connectivity.Idle)
cc.idlenessState = ccIdlenessStateIdle
- cc.mu.Unlock()
+ cc.addTraceEvent("entering idle mode")
go func() {
- cc.addTraceEvent("entering idle mode")
for ac := range conns {
ac.tearDown(errConnIdling)
}
}()
+
return nil
}
@@ -804,6 +804,12 @@ func init() {
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.enterIdleMode()
+ }
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.exitIdleMode()
+ }
}
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 1fd0d5c12..cfc9fd85e 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions {
UseProxy: true,
},
recvBufferPool: nopBufferPool{},
+ idleTimeout: 30 * time.Minute,
}
}
@@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
// channel will exit idle mode when the Connect() method is called or when an
// RPC is initiated.
//
-// By default this feature is disabled, which can also be explicitly configured
-// by passing zero to this function.
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
//
// # Experimental
//
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 69d5580b6..5ebf88d71 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -38,6 +38,10 @@ const Identity = "identity"
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression. A return value of -1 indicates unknown size.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an
// error occurs while initializing the compressor, that error is returned
@@ -51,15 +55,6 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
- // If a Compressor implements
- // DecompressedSize(compressedBytes []byte) int, gRPC will call it
- // to determine the size of the buffer allocated for the result of decompression.
- // Return -1 to indicate unknown size.
- //
- // Experimental
- //
- // Notice: This API is EXPERIMENTAL and may be changed or removed in a
- // later release.
}
var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index a01a1b4d5..4439cda0f 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -44,8 +44,15 @@ const (
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type HealthClient interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
@@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
// All implementations should embed UnimplementedHealthServer
// for forward compatibility
type HealthServer interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 5fc0ee3da..fed1c011a 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -23,6 +23,8 @@
package backoff
import (
+ "context"
+ "errors"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
@@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
return time.Duration(backoff)
}
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+ attempt := 0
+ timer := time.NewTimer(0)
+ for ctx.Err() == nil {
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ }
+
+ err := f()
+ if errors.Is(err, ErrResetBackoff) {
+ timer.Reset(0)
+ attempt = 0
+ continue
+ }
+ if err != nil {
+ return
+ }
+ timer.Reset(backoff(attempt))
+ attempt++
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c8a8c76d6..0d94c63e0 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -175,6 +175,12 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
GRPCResolverSchemeExtraMetadata string = "xds"
+
+ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+ EnterIdleModeForTesting any // func(*grpc.ClientConn) error
+
+ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+ ExitIdleModeForTesting any // func(*grpc.ClientConn) error
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 4cf85cad9..03ef2fedd 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -43,6 +43,34 @@ type Status struct {
s *spb.Status
}
+// NewWithProto returns a new status including details from statusProto. This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+ if len(statusProto) != 1 {
+ // No grpc-status-details bin header, or multiple; just ignore.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ st := &spb.Status{}
+ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+ // Probably not a google.rpc.Status proto; do not provide details.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ if st.Code == int32(code) {
+ // The codes match between the grpc-status header and the
+ // grpc-status-details-bin header; use the full details proto.
+ return &Status{s: st}
+ }
+ return &Status{
+ s: &spb.Status{
+ Code: int32(codes.Internal),
+ Message: fmt.Sprintf(
+ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+ code, message, st,
+ ),
+ },
+ }
+}
+
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 98f80e3fa..17f7a21b5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
+ s.hdrMu.Lock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
}
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
+ if len(s.trailer) > 0 {
+ for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
@@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
+ s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
}
// writeCustomHeaders sets custom headers set on the stream via SetHeader
-// on the first write call (Write, WriteHeader, or WriteStatus).
+// on the first write call (Write, WriteHeader, or WriteStatus)
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
h := ht.rw.Header()
@@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
return err
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
ctx := ht.req.Context()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index badab8acf..d6f5c4935 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
mdata = make(map[string][]string)
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
- statusGen *status.Status
recvCompress string
httpStatusCode *int
httpStatusErr string
@@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
rawStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
- case "grpc-status-details-bin":
- var err error
- statusGen, err = decodeGRPCStatusDetails(hf.Value)
- if err != nil {
- headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
- }
case ":status":
if hf.Value == "200" {
httpStatusErr = ""
@@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- if statusGen == nil {
- statusGen = status.New(rawStatusCode, grpcMessage)
- }
+ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
rstStream := s.getState() == streamActive
- t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
+ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
}
// readServerPreface reads and handles the initial settings frame from the
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index c06db679d..6fa1eb419 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if t.inTapHandle != nil {
var err error
- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
t.mu.Unlock()
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
@@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
- s.ctx = traceCtx(s.ctx, s.method)
for _, sh := range t.stats {
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
@@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
defer close(t.readerDone)
for {
t.controlBuf.throttle()
@@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
+ if err := t.operateHeaders(frame, handle); err != nil {
t.Close(err)
break
}
@@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ // Do not use the user's grpc-status-details-bin (if present) if we are
+ // even attempting to set our own.
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 195814008..dc29d590e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -34,12 +34,9 @@ import (
"time"
"unicode/utf8"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
- spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
const (
@@ -88,6 +85,8 @@ var (
}
)
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
+
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
// user-specified metadata.
@@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
- "grpc-status-details-bin",
// Intentionally exclude grpc-previous-rpc-attempts and
// grpc-retry-pushback-ms, which are "reserved", but their API
// intentionally works via metadata.
@@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) {
return v, nil
}
-func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
- v, err := decodeBinHeader(rawDetails)
- if err != nil {
- return nil, err
- }
- st := &spb.Status{}
- if err = proto.Unmarshal(v, st); err != nil {
- return nil, err
- }
- return status.FromProto(st), nil
-}
-
type timeoutUnit uint8
const (
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 74a811fc0..aac056e72 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -698,7 +698,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream), func(context.Context, string) context.Context)
+ HandleStreams(func(*Stream))
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
index e6b0f14cd..0a4262342 100644
--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -26,7 +26,9 @@ import (
"google.golang.org/grpc/resolver"
)
-// NewBuilderWithScheme creates a new test resolver builder with the given scheme.
+// NewBuilderWithScheme creates a new manual resolver builder with the given
+// scheme. Every instance of the manual resolver may only ever be used with a
+// single grpc.ClientConn. Otherwise, bad things will happen.
func NewBuilderWithScheme(scheme string) *Resolver {
return &Resolver{
BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {},
@@ -58,30 +60,34 @@ type Resolver struct {
scheme string
// Fields actually belong to the resolver.
- mu sync.Mutex // Guards access to CC.
- CC resolver.ClientConn
- bootstrapState *resolver.State
+ // Guards access to below fields.
+ mu sync.Mutex
+ CC resolver.ClientConn
+ // Storing the most recent state update makes this resolver resilient to
+ // restarts, which is possible with channel idleness.
+ lastSeenState *resolver.State
}
// InitialState adds initial state to the resolver so that UpdateState doesn't
// need to be explicitly called after Dial.
func (r *Resolver) InitialState(s resolver.State) {
- r.bootstrapState = &s
+ r.lastSeenState = &s
}
// Build returns itself for Resolver, because it's both a builder and a resolver.
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ r.BuildCallback(target, cc, opts)
r.mu.Lock()
r.CC = cc
- r.mu.Unlock()
- r.BuildCallback(target, cc, opts)
- if r.bootstrapState != nil {
- r.UpdateState(*r.bootstrapState)
+ if r.lastSeenState != nil {
+ err := r.CC.UpdateState(*r.lastSeenState)
+ go r.UpdateStateCallback(err)
}
+ r.mu.Unlock()
return r, nil
}
-// Scheme returns the test scheme.
+// Scheme returns the manual resolver's scheme.
func (r *Resolver) Scheme() string {
return r.scheme
}
@@ -100,6 +106,7 @@ func (r *Resolver) Close() {
func (r *Resolver) UpdateState(s resolver.State) {
r.mu.Lock()
err := r.CC.UpdateState(s)
+ r.lastSeenState = &s
r.mu.Unlock()
r.UpdateStateCallback(err)
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index eeae92fbe..8f60d4214 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
f := func() {
defer streamQuota.release()
defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
+ s.handleStream(st, stream)
}
if s.opts.numServerWorkers > 0 {
@@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
}
}
go f()
- }, func(ctx context.Context, method string) context.Context {
- if !EnableTracing {
- return ctx
- }
- tr := trace.New("grpc.Recv."+methodFamily(method), method)
- return trace.NewContext(ctx, tr)
})
wg.Wait()
}
@@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.serveStreams(st)
}
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- if !EnableTracing {
- return nil
- }
- tr, ok := trace.FromContext(stream.Context())
- if !ok {
- return nil
- }
-
- trInfo = &traceInfo{
- tr: tr,
- firstLine: firstLine{
- client: false,
- remoteAddr: st.RemoteAddr(),
- },
- }
- if dl, ok := stream.Context().Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(dl)
- }
- return trInfo
-}
-
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() {
atomic.AddInt64(&s.czData.callsFailed, 1)
}
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
@@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
err = t.Write(stream, hdr, payload, opts)
if err == nil {
for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
}
}
return err
@@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
}
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
shs := s.opts.statsHandlers
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
@@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
IsClientStream: false,
IsServerStream: false,
}
- sh.HandleRPC(stream.Context(), statsBegin)
+ sh.HandleRPC(ctx, statsBegin)
}
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
@@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- sh.HandleRPC(stream.Context(), end)
+ sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}
if len(binlogs) != 0 {
- ctx := stream.Context()
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
Header: md,
@@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), &stats.InPayload{
+ sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
Length: len(d),
@@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: d,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), cm)
+ binlog.Log(ctx, cm)
}
}
if trInfo != nil {
@@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
return nil
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
@@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Header: h,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
+ binlog.Log(ctx, sh)
}
}
st := &binarylog.ServerTrailer{
@@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return appErr
@@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if stream.SendCompress() != sendCompressorName {
comp = encoding.GetCompressor(stream.SendCompress())
}
- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+ if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
@@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, st)
}
}
return err
@@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: reply,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
- binlog.Log(stream.Context(), sm)
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, sm)
}
}
if channelz.IsOn() {
@@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return t.WriteStatus(stream, statusOK)
@@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
}
}
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
}
@@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
IsServerStream: sd.ServerStreams,
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), statsBegin)
+ sh.HandleRPC(ctx, statsBegin)
}
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
t: t,
@@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err)
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), end)
+ sh.HandleRPC(ctx, end)
}
}
@@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), logEntry)
+ binlog.Log(ctx, logEntry)
}
}
@@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
Err: appErr,
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
t.WriteStatus(ss.s, appStatus)
@@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
Err: appErr,
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return t.WriteStatus(ss.s, statusOK)
}
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
+ ctx := stream.Context()
+ var ti *traceInfo
+ if EnableTracing {
+ tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
+ ctx = trace.NewContext(ctx, tr)
+ ti = &traceInfo{
+ tr: tr,
+ firstLine: firstLine{
+ client: false,
+ remoteAddr: t.RemoteAddr(),
+ },
+ }
+ if dl, ok := ctx.Deadline(); ok {
+ ti.firstLine.deadline = time.Until(dl)
+ }
+ }
+
sm := stream.Method()
if sm != "" && sm[0] == '/' {
sm = sm[1:]
}
pos := strings.LastIndex(sm, "/")
if pos == -1 {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
+ ti.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
return
}
@@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
srv, knownService := s.services[service]
if knownService {
if md, ok := srv.methods[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
+ s.processUnaryRPC(ctx, t, stream, srv, md, ti)
return
}
if sd, ok := srv.streams[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
+ s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
return
}
}
// Unknown service, or known server unknown method.
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
return
}
var errDesc string
@@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
} else {
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
}
- if trInfo != nil {
- trInfo.tr.LazyPrintf("%s", errDesc)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyPrintf("%s", errDesc)
+ ti.tr.SetError()
}
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index bfa5dfa40..07f012576 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -27,6 +27,8 @@ package tap
import (
"context"
+
+ "google.golang.org/grpc/metadata"
)
// Info defines the relevant information needed by the handles.
@@ -34,6 +36,10 @@ type Info struct {
// FullMethodName is the string of grpc method (in the format of
// /package.service/method).
FullMethodName string
+
+ // Header contains the header metadata received.
+ Header metadata.MD
+
// TODO: More to be added.
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 724ad2102..6d2cadd79 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.58.3"
+const Version = "1.59.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index bbc9e2e3c..bb480f1f9 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
+# - Ensure all usages of grpc_testing package are renamed when importing.
+not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
+
# - Ensure all xds proto imports are renamed to *pb or *grpc.
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
index 799d866d5..9887d185b 100644
--- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
+++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
@@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) {
}
}
- if union.Discriminator != nil && len(union.Fields) == 0 {
- return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator)
- }
return union, nil
}
diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go
index e4e740cad..e0811e834 100644
--- a/vendor/k8s.io/utils/integer/integer.go
+++ b/vendor/k8s.io/utils/integer/integer.go
@@ -16,6 +16,8 @@ limitations under the License.
package integer
+import "math"
+
// IntMax returns the maximum of the params
func IntMax(a, b int) int {
if b > a {
@@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 {
}
// RoundToInt32 rounds floats into integer numbers.
+// Deprecated: use math.Round() and a cast directly.
func RoundToInt32(a float64) int32 {
- if a < 0 {
- return int32(a - 0.5)
- }
- return int32(a + 0.5)
+ return int32(math.Round(a))
}
diff --git a/vendor/kmodules.xyz/client-go/core/v1/pod_status.go b/vendor/kmodules.xyz/client-go/core/v1/pod_status.go
index eab9e7664..cf87120a9 100644
--- a/vendor/kmodules.xyz/client-go/core/v1/pod_status.go
+++ b/vendor/kmodules.xyz/client-go/core/v1/pod_status.go
@@ -17,6 +17,8 @@ limitations under the License.
package v1
import (
+ "fmt"
+
core "k8s.io/api/core/v1"
)
@@ -108,3 +110,127 @@ func UpsertPodReadinessGateConditionType(readinessGates []core.PodReadinessGate,
ConditionType: conditionType,
})
}
+
+const (
+ // NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive
+ // on the node it is (was) running.
+ NodeUnreachablePodReason = "NodeLost"
+)
+
+// GetPodStatus returns pod status like kubectl
+// Adapted from: https://github.com/kubernetes/kubernetes/blob/735804dc812ce647f8c130dced45b5ba4079b76e/pkg/printers/internalversion/printers.go#L825
+func GetPodStatus(pod *core.Pod) string {
+ reason := string(pod.Status.Phase)
+ if pod.Status.Reason != "" {
+ reason = pod.Status.Reason
+ }
+
+ // If the Pod carries {type:PodScheduled, reason:WaitingForGates}, set reason to 'SchedulingGated'.
+ for _, condition := range pod.Status.Conditions {
+ if condition.Type == core.PodScheduled && condition.Reason == core.PodReasonSchedulingGated {
+ reason = core.PodReasonSchedulingGated
+ }
+ }
+
+ initContainers := make(map[string]*core.Container)
+ for i := range pod.Spec.InitContainers {
+ initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
+ }
+
+ initializing := false
+ for i := range pod.Status.InitContainerStatuses {
+ container := pod.Status.InitContainerStatuses[i]
+ switch {
+ case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
+ continue
+ case isRestartableInitContainer(initContainers[container.Name]) &&
+ container.Started != nil && *container.Started:
+ continue
+ case container.State.Terminated != nil:
+ // initialization is failed
+ if len(container.State.Terminated.Reason) == 0 {
+ if container.State.Terminated.Signal != 0 {
+ reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
+ } else {
+ reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
+ }
+ } else {
+ reason = "Init:" + container.State.Terminated.Reason
+ }
+ initializing = true
+ case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
+ reason = "Init:" + container.State.Waiting.Reason
+ initializing = true
+ default:
+ reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
+ initializing = true
+ }
+ break
+ }
+
+ if !initializing || isPodInitializedConditionTrue(&pod.Status) {
+ hasRunning := false
+ for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
+ container := pod.Status.ContainerStatuses[i]
+
+ if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
+ reason = container.State.Waiting.Reason
+ } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
+ reason = container.State.Terminated.Reason
+ } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
+ if container.State.Terminated.Signal != 0 {
+ reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
+ } else {
+ reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
+ }
+ } else if container.Ready && container.State.Running != nil {
+ hasRunning = true
+ }
+ }
+
+ // change pod status back to "Running" if there is at least one container still reporting as "Running" status
+ if reason == "Completed" && hasRunning {
+ if hasPodReadyCondition(pod.Status.Conditions) {
+ reason = "Running"
+ } else {
+ reason = "NotReady"
+ }
+ }
+ }
+
+ if pod.DeletionTimestamp != nil && pod.Status.Reason == NodeUnreachablePodReason {
+ reason = "Unknown"
+ } else if pod.DeletionTimestamp != nil {
+ reason = "Terminating"
+ }
+
+ return reason
+}
+
+func hasPodReadyCondition(conditions []core.PodCondition) bool {
+ for _, condition := range conditions {
+ if condition.Type == core.PodReady && condition.Status == core.ConditionTrue {
+ return true
+ }
+ }
+ return false
+}
+
+func isRestartableInitContainer(initContainer *core.Container) bool {
+ if initContainer.RestartPolicy == nil {
+ return false
+ }
+
+ return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways
+}
+
+func isPodInitializedConditionTrue(status *core.PodStatus) bool {
+ for _, condition := range status.Conditions {
+ if condition.Type != core.PodInitialized {
+ continue
+ }
+
+ return condition.Status == core.ConditionTrue
+ }
+ return false
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4a7207439..8aa01ec76 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -4,8 +4,8 @@ cloud.google.com/go/compute/internal
# cloud.google.com/go/compute/metadata v0.2.3
## explicit; go 1.19
cloud.google.com/go/compute/metadata
-# cloud.google.com/go/storage v1.27.0
-## explicit; go 1.17
+# cloud.google.com/go/storage v1.30.1
+## explicit; go 1.19
# github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
## explicit
github.com/Azure/azure-sdk-for-go/storage
@@ -54,13 +54,14 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4
# github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2
## explicit
github.com/armon/circbuf
-# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
-## explicit
+# github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535
+## explicit; go 1.12
github.com/asaskevich/govalidator
-# github.com/aws/aws-sdk-go v1.44.100
+# github.com/aws/aws-sdk-go v1.45.7
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
+github.com/aws/aws-sdk-go/aws/auth/bearer
github.com/aws/aws-sdk-go/aws/awserr
github.com/aws/aws-sdk-go/aws/awsutil
github.com/aws/aws-sdk-go/aws/client
@@ -108,6 +109,7 @@ github.com/aws/aws-sdk-go/service/s3/s3iface
github.com/aws/aws-sdk-go/service/s3/s3manager
github.com/aws/aws-sdk-go/service/sso
github.com/aws/aws-sdk-go/service/sso/ssoiface
+github.com/aws/aws-sdk-go/service/ssooidc
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/beorn7/perks v1.0.1
@@ -155,7 +157,7 @@ github.com/coreos/go-systemd/v22/journal
# github.com/cpuguy83/go-md2man/v2 v2.0.2
## explicit; go 1.11
github.com/cpuguy83/go-md2man/v2/md2man
-# github.com/davecgh/go-spew v1.1.1
+# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
## explicit
github.com/davecgh/go-spew/spew
# github.com/distribution/reference v0.5.0
@@ -170,7 +172,7 @@ github.com/docker/cli/cli/config/types
# github.com/docker/distribution v2.8.2+incompatible
## explicit
github.com/docker/distribution/registry/client/auth/challenge
-# github.com/docker/docker v24.0.0+incompatible
+# github.com/docker/docker v24.0.5+incompatible
## explicit
github.com/docker/docker/pkg/homedir
# github.com/docker/docker-credential-helpers v0.7.0
@@ -193,7 +195,7 @@ github.com/evanphx/json-patch/v5
# github.com/fatih/structs v1.1.0
## explicit
github.com/fatih/structs
-# github.com/felixge/httpsnoop v1.0.3
+# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.7.0
@@ -216,17 +218,17 @@ github.com/go-logr/logr/slogr
# github.com/go-logr/stdr v1.2.2
## explicit; go 1.16
github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.19.6
-## explicit; go 1.13
+# github.com/go-openapi/jsonpointer v0.20.0
+## explicit; go 1.18
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.22.3
+# github.com/go-openapi/swag v0.22.4
## explicit; go 1.18
github.com/go-openapi/swag
-# github.com/go-sql-driver/mysql v1.5.0
+# github.com/go-sql-driver/mysql v1.6.0
## explicit; go 1.10
github.com/go-sql-driver/mysql
# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
@@ -359,18 +361,19 @@ github.com/google/s2a-go/stream
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
## explicit; go 1.13
github.com/google/shlex
-# github.com/google/uuid v1.3.0
+# github.com/google/uuid v1.4.0
## explicit
github.com/google/uuid
-# github.com/googleapis/enterprise-certificate-proxy v0.2.3
+# github.com/googleapis/enterprise-certificate-proxy v0.2.5
## explicit; go 1.19
github.com/googleapis/enterprise-certificate-proxy/client
github.com/googleapis/enterprise-certificate-proxy/client/util
-# github.com/googleapis/gax-go/v2 v2.11.0
+# github.com/googleapis/gax-go/v2 v2.12.0
## explicit; go 1.19
github.com/googleapis/gax-go/v2
github.com/googleapis/gax-go/v2/apierror
github.com/googleapis/gax-go/v2/apierror/internal/proto
+github.com/googleapis/gax-go/v2/callctx
github.com/googleapis/gax-go/v2/internal
# github.com/gorilla/websocket v1.5.0
## explicit; go 1.12
@@ -395,7 +398,7 @@ github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
-# github.com/jmespath/go-jmespath v0.4.0
+# github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24
## explicit; go 1.14
github.com/jmespath/go-jmespath
# github.com/josharian/intern v1.0.0
@@ -433,6 +436,8 @@ github.com/liggitt/tabwriter
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
+# github.com/mattn/go-isatty v0.0.18
+## explicit; go 1.15
# github.com/matttproud/golang_protobuf_extensions v1.0.4
## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil
@@ -452,7 +457,7 @@ github.com/mitchellh/reflectwalk
## explicit; go 1.13
github.com/moby/spdystream
github.com/moby/spdystream/spdy
-# github.com/moby/term v0.0.0-20221205130635-1aeaba878587
+# github.com/moby/term v0.5.0
## explicit; go 1.18
github.com/moby/term
github.com/moby/term/windows
@@ -533,6 +538,8 @@ github.com/peterbourgon/diskv
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
+# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
+## explicit
# github.com/prometheus/client_golang v1.17.0
## explicit; go 1.19
github.com/prometheus/client_golang/prometheus
@@ -555,6 +562,8 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
+# github.com/rogpeppe/go-internal v1.11.0
+## explicit; go 1.19
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
@@ -564,7 +573,7 @@ github.com/sergi/go-diff/diffmatchpatch
# github.com/shopspring/decimal v1.2.0
## explicit; go 1.13
github.com/shopspring/decimal
-# github.com/sirupsen/logrus v1.9.1
+# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
# github.com/spf13/cast v1.5.0
@@ -604,13 +613,13 @@ go.bytebuilders.dev/license-proxyserver/apis/proxyserver/v1alpha1
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/scheme
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/typed/proxyserver/v1alpha1
-# go.bytebuilders.dev/license-verifier v0.13.5-0.20231201082110-9aacdfe19222
+# go.bytebuilders.dev/license-verifier v0.13.4
## explicit; go 1.18
go.bytebuilders.dev/license-verifier
go.bytebuilders.dev/license-verifier/apis/licenses
go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1
go.bytebuilders.dev/license-verifier/info
-# go.bytebuilders.dev/license-verifier/kubernetes v0.13.5-0.20231201082110-9aacdfe19222
+# go.bytebuilders.dev/license-verifier/kubernetes v0.13.4
## explicit; go 1.18
go.bytebuilders.dev/license-verifier/kubernetes
# go.etcd.io/etcd/api/v3 v3.5.10
@@ -654,15 +663,15 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0
-## explicit; go 1.19
+# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0
+## explicit; go 1.20
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0
-## explicit; go 1.19
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0
+## explicit; go 1.20
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.19.0
+# go.opentelemetry.io/otel v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -677,22 +686,22 @@ go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.21.0
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.19.0
+# go.opentelemetry.io/otel/metric v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/sdk v1.19.0
+# go.opentelemetry.io/otel/sdk v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@@ -700,9 +709,11 @@ go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.19.0
+# go.opentelemetry.io/otel/trace v1.20.0
## explicit; go 1.20
go.opentelemetry.io/otel/trace
+go.opentelemetry.io/otel/trace/embedded
+go.opentelemetry.io/otel/trace/noop
# go.opentelemetry.io/proto/otlp v1.0.0
## explicit; go 1.17
go.opentelemetry.io/proto/otlp/collector/trace/v1
@@ -732,7 +743,7 @@ go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
go.uber.org/zap/zapgrpc
-# gocloud.dev v0.22.0
+# gocloud.dev v0.26.0
## explicit; go 1.12
gocloud.dev/blob
gocloud.dev/blob/driver
@@ -766,13 +777,13 @@ golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/scrypt
-# golang.org/x/exp v0.0.0-20220827204233-334a2380cb91
-## explicit; go 1.18
+# golang.org/x/exp v0.0.0-20230905200255-921286631fa9
+## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
golang.org/x/exp/slices
-# golang.org/x/net v0.17.0
-## explicit; go 1.17
+# golang.org/x/net v0.19.0
+## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/html
golang.org/x/net/html/atom
@@ -786,7 +797,7 @@ golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.13.0
+# golang.org/x/oauth2 v0.15.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@@ -844,8 +855,8 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/time v0.3.0
-## explicit
+# golang.org/x/time v0.5.0
+## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.14.0
## explicit; go 1.18
@@ -855,11 +866,11 @@ golang.org/x/tools/internal/typeparams
## explicit; go 1.17
golang.org/x/xerrors
golang.org/x/xerrors/internal
-# gomodules.xyz/blobfs v0.1.10
-## explicit; go 1.14
+# gomodules.xyz/blobfs v0.1.14
+## explicit; go 1.18
gomodules.xyz/blobfs
-# gomodules.xyz/cert v1.5.0
-## explicit; go 1.17
+# gomodules.xyz/cert v1.5.2
+## explicit; go 1.20
gomodules.xyz/cert
gomodules.xyz/cert/certstore
# gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f
@@ -870,8 +881,8 @@ gomodules.xyz/clock
gomodules.xyz/encoding/json
gomodules.xyz/encoding/json/types
gomodules.xyz/encoding/yaml
-# gomodules.xyz/envsubst v0.1.0
-## explicit; go 1.12
+# gomodules.xyz/envsubst v0.2.0
+## explicit; go 1.19
gomodules.xyz/envsubst
gomodules.xyz/envsubst/parse
gomodules.xyz/envsubst/path
@@ -927,7 +938,7 @@ gomodules.xyz/x/crypto/rand
gomodules.xyz/x/net
gomodules.xyz/x/strings
gomodules.xyz/x/version
-# google.golang.org/api v0.126.0
+# google.golang.org/api v0.140.0
## explicit; go 1.19
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@@ -941,7 +952,7 @@ google.golang.org/api/option/internaloption
google.golang.org/api/storage/v1
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-# google.golang.org/appengine v1.6.7
+# google.golang.org/appengine v1.6.8
## explicit; go 1.11
google.golang.org/appengine
google.golang.org/appengine/internal
@@ -953,21 +964,21 @@ google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5
+# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d
## explicit; go 1.19
google.golang.org/genproto/internal
-# google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e
+# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.58.3
+# google.golang.org/grpc v1.59.0
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -1072,6 +1083,8 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
+# gotest.tools/v3 v3.5.0
+## explicit; go 1.17
# k8s.io/api v0.29.0
## explicit; go 1.21
k8s.io/api/admission/v1
@@ -1726,7 +1739,7 @@ k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1
-# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
+# k8s.io/kube-openapi v0.0.0-20231129212854-f0671cc7e66a
## explicit; go 1.19
k8s.io/kube-openapi/pkg/builder
k8s.io/kube-openapi/pkg/builder3
@@ -1747,7 +1760,7 @@ k8s.io/kube-openapi/pkg/validation/errors
k8s.io/kube-openapi/pkg/validation/spec
k8s.io/kube-openapi/pkg/validation/strfmt
k8s.io/kube-openapi/pkg/validation/strfmt/bson
-# k8s.io/kubernetes v0.0.0-00010101000000-000000000000 => github.com/kmodules/kubernetes v1.30.0-alpha.0.0.20231224075822-3bd9a13c86db
+# k8s.io/kubernetes v1.13.0 => github.com/kmodules/kubernetes v1.30.0-alpha.0.0.20231224075822-3bd9a13c86db
## explicit; go 1.21
k8s.io/kubernetes/pkg/api/legacyscheme
k8s.io/kubernetes/pkg/api/v1/service
@@ -1780,7 +1793,7 @@ k8s.io/kubernetes/pkg/apis/storage/v1alpha1
k8s.io/kubernetes/pkg/apis/storage/v1beta1
k8s.io/kubernetes/pkg/features
k8s.io/kubernetes/pkg/util/parsers
-# k8s.io/utils v0.0.0-20230726121419-3b25d923346b
+# k8s.io/utils v0.0.0-20231127182322-b307cd553661
## explicit; go 1.18
k8s.io/utils/buffer
k8s.io/utils/clock
@@ -1799,7 +1812,7 @@ k8s.io/utils/trace
# kmodules.xyz/apiversion v0.2.0
## explicit; go 1.14
kmodules.xyz/apiversion
-# kmodules.xyz/client-go v0.29.3
+# kmodules.xyz/client-go v0.29.4
## explicit; go 1.21.5
kmodules.xyz/client-go
kmodules.xyz/client-go/admissionregistration
@@ -1829,8 +1842,8 @@ kmodules.xyz/constants/aws
kmodules.xyz/constants/azure
kmodules.xyz/constants/google
kmodules.xyz/constants/openstack
-# kmodules.xyz/csi-utils v0.25.5-0.20231224143309-8dc6b81da55a
-## explicit; go 1.21.5
+# kmodules.xyz/csi-utils v0.25.4
+## explicit; go 1.18
kmodules.xyz/csi-utils/volumesnapshot
kmodules.xyz/csi-utils/volumesnapshot/v1
kmodules.xyz/csi-utils/volumesnapshot/v1beta1
@@ -1890,7 +1903,7 @@ kmodules.xyz/prober/probe
kmodules.xyz/prober/probe/exec
kmodules.xyz/prober/probe/http
kmodules.xyz/prober/probe/tcp
-# kmodules.xyz/resource-metadata v0.18.1-0.20231225184845-c3650ae60818
+# kmodules.xyz/resource-metadata v0.18.1
## explicit; go 1.21.5
kmodules.xyz/resource-metadata/apis/core/v1alpha1
kmodules.xyz/resource-metadata/apis/shared
@@ -2049,7 +2062,7 @@ sigs.k8s.io/structured-merge-diff/v4/value
## explicit; go 1.12
sigs.k8s.io/yaml
sigs.k8s.io/yaml/goyaml.v2
-# stash.appscode.dev/apimachinery v0.32.1-0.20231225221320-6f5a8df2ab6e
+# stash.appscode.dev/apimachinery v0.32.1-0.20240101013736-ef308633d8b2
## explicit; go 1.21.5
stash.appscode.dev/apimachinery/apis
stash.appscode.dev/apimachinery/apis/repositories