-const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
deleted file mode 100644
index c1718234e..000000000
--- a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-)
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-)
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/context.go b/tools/vendor/go.opentelemetry.io/otel/trace/context.go
index 5650a174b..8c45a7107 100644
--- a/tools/vendor/go.opentelemetry.io/otel/trace/context.go
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
}
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
// as a remote SpanContext and as the current Span. The Span implementation
// that wraps rsc is non-recording and performs no operations other than to
// return rsc as the SpanContext from the SpanContext method.
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/doc.go b/tools/vendor/go.opentelemetry.io/otel/trace/doc.go
index d661c5d10..cdbf41d6d 100644
--- a/tools/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -96,7 +96,7 @@ can embed the API interface directly.
This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
dependency.
Finally, an author can embed another implementation in theirs. The embedded
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/provider.go b/tools/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 000000000..ef85cb70c
--- /dev/null
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/span.go b/tools/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 000000000..d3aa476ee
--- /dev/null
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/trace.go b/tools/vendor/go.opentelemetry.io/otel/trace/trace.go
index 28877d4ab..d49adf671 100644
--- a/tools/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"bytes"
- "context"
"encoding/hex"
"encoding/json"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
Remote: sc.remote,
})
}
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // AddLink adds a link.
- // Adding links at span creation using WithLinks is preferred to calling AddLink
- // later, for contexts that are available during span creation, because head
- // sampling decisions can only consider information present during span creation.
- AddLink(link Link)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go b/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 000000000..77952d2a0
--- /dev/null
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index 20b5cf243..dc5e34cad 100644
--- a/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string {
return ""
}
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
// Insert adds a new list-member defined by the key/value pair to the
// TraceState. If a list-member already exists for the given key, that
// list-member's value is updated. The new or updated list-member is always
diff --git a/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh b/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index e57bf57fc..000000000
--- a/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
- printf "GOPATH is not defined.\n"
- exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
- printf "GOPATH ${GOPATH} is invalid \n"
- exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
- git status
- printf "\n\nError: working tree is not clean\n"
- exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
- printf "$(git log -1)"
- printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
- printf " Update go.mod for $dir\n"
- (cd "${DIR_TMP}/${dir}" && \
- # replaces is ("mod1" "mod2" …)
- replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
- # strip double quotes
- replaces=("${replaces[@]%\"}") && \
- replaces=("${replaces[@]#\"}") && \
- # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
- dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
- go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
- go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
- printf " Build $ex in ${DIR_TMP}/${ex}\n"
- (cd "${DIR_TMP}/${ex}" && \
- go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 000000000..c9b7cdbbf
--- /dev/null
+++ b/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+ echo "Error: The released sections of the changelog file have been modified."
+ diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+ rm -rf "$TEMP_DIR"
+ false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/tools/vendor/go.opentelemetry.io/otel/version.go b/tools/vendor/go.opentelemetry.io/otel/version.go
index ab2896052..59e248161 100644
--- a/tools/vendor/go.opentelemetry.io/otel/version.go
+++ b/tools/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.28.0"
+ return "1.32.0"
}
diff --git a/tools/vendor/go.opentelemetry.io/otel/versions.yaml b/tools/vendor/go.opentelemetry.io/otel/versions.yaml
index 241cfc82a..c04b12f6b 100644
--- a/tools/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/tools/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,19 +3,13 @@
module-sets:
stable-v1:
- version: v1.28.0
+ version: v1.32.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- - go.opentelemetry.io/otel/example/dice
- - go.opentelemetry.io/otel/example/namedtracer
- - go.opentelemetry.io/otel/example/opencensus
- - go.opentelemetry.io/otel/example/otel-collector
- - go.opentelemetry.io/otel/example/passthrough
- - go.opentelemetry.io/otel/example/zipkin
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
@@ -29,21 +23,20 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.50.0
+ version: v0.54.0
modules:
- - go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.4.0
+ version: v0.8.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.8
+ version: v0.0.11
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
diff --git a/tools/vendor/golang.org/x/exp/constraints/constraints.go b/tools/vendor/golang.org/x/exp/constraints/constraints.go
deleted file mode 100644
index 2c033dff4..000000000
--- a/tools/vendor/golang.org/x/exp/constraints/constraints.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package constraints defines a set of useful constraints to be used
-// with type parameters.
-package constraints
-
-// Signed is a constraint that permits any signed integer type.
-// If future releases of Go add new predeclared signed integer types,
-// this constraint will be modified to include them.
-type Signed interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64
-}
-
-// Unsigned is a constraint that permits any unsigned integer type.
-// If future releases of Go add new predeclared unsigned integer types,
-// this constraint will be modified to include them.
-type Unsigned interface {
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
-}
-
-// Integer is a constraint that permits any integer type.
-// If future releases of Go add new predeclared integer types,
-// this constraint will be modified to include them.
-type Integer interface {
- Signed | Unsigned
-}
-
-// Float is a constraint that permits any floating-point type.
-// If future releases of Go add new predeclared floating-point types,
-// this constraint will be modified to include them.
-type Float interface {
- ~float32 | ~float64
-}
-
-// Complex is a constraint that permits any complex numeric type.
-// If future releases of Go add new predeclared complex numeric types,
-// this constraint will be modified to include them.
-type Complex interface {
- ~complex64 | ~complex128
-}
-
-// Ordered is a constraint that permits any ordered type: any type
-// that supports the operators < <= >= >.
-// If future releases of Go add new ordered types,
-// this constraint will be modified to include them.
-type Ordered interface {
- Integer | Float | ~string
-}
diff --git a/tools/vendor/golang.org/x/exp/maps/maps.go b/tools/vendor/golang.org/x/exp/maps/maps.go
index ecc0dabb7..c25939b92 100644
--- a/tools/vendor/golang.org/x/exp/maps/maps.go
+++ b/tools/vendor/golang.org/x/exp/maps/maps.go
@@ -5,9 +5,20 @@
// Package maps defines various functions useful with maps of any type.
package maps
+import "maps"
+
+// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
+// these functions except Keys and Values should be annotated
+// (provisionally with "//go:fix inline") so that tools can safely and
+// automatically replace calls to exp/maps with calls to std maps by
+// inlining them.
+
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+ // The simplest true equivalent using std is:
+ // return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)).
+
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
@@ -18,6 +29,9 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// Values returns the values of the map m.
// The values will be in an indeterminate order.
func Values[M ~map[K]V, K comparable, V any](m M) []V {
+ // The simplest true equivalent using std is:
+ // return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)).
+
r := make([]V, 0, len(m))
for _, v := range m {
r = append(r, v)
@@ -28,50 +42,24 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || v1 != v2 {
- return false
- }
- }
- return true
+ return maps.Equal(m1, m2)
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || !eq(v1, v2) {
- return false
- }
- }
- return true
+ return maps.EqualFunc(m1, m2, eq)
}
// Clear removes all entries from m, leaving it empty.
func Clear[M ~map[K]V, K comparable, V any](m M) {
- for k := range m {
- delete(m, k)
- }
+ clear(m)
}
// Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment.
func Clone[M ~map[K]V, K comparable, V any](m M) M {
- // Preserve nil in case it matters.
- if m == nil {
- return nil
- }
- r := make(M, len(m))
- for k, v := range m {
- r[k] = v
- }
- return r
+ return maps.Clone(m)
}
// Copy copies all key/value pairs in src adding them to dst.
@@ -79,16 +67,10 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M {
// the value in dst will be overwritten by the value associated
// with the key in src.
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
- for k, v := range src {
- dst[k] = v
- }
+ maps.Copy(dst, src)
}
// DeleteFunc deletes any key/value pairs from m for which del returns true.
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
- for k, v := range m {
- if del(k, v) {
- delete(m, k)
- }
- }
+ maps.DeleteFunc(m, del)
}
diff --git a/tools/vendor/golang.org/x/exp/slices/cmp.go b/tools/vendor/golang.org/x/exp/slices/cmp.go
deleted file mode 100644
index fbf1934a0..000000000
--- a/tools/vendor/golang.org/x/exp/slices/cmp.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// min is a version of the predeclared function from the Go 1.21 release.
-func min[T constraints.Ordered](a, b T) T {
- if a < b || isNaN(a) {
- return a
- }
- return b
-}
-
-// max is a version of the predeclared function from the Go 1.21 release.
-func max[T constraints.Ordered](a, b T) T {
- if a > b || isNaN(a) {
- return a
- }
- return b
-}
-
-// cmpLess is a copy of cmp.Less from the Go 1.21 release.
-func cmpLess[T constraints.Ordered](x, y T) bool {
- return (isNaN(x) && !isNaN(y)) || x < y
-}
-
-// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
-func cmpCompare[T constraints.Ordered](x, y T) int {
- xNaN := isNaN(x)
- yNaN := isNaN(y)
- if xNaN && yNaN {
- return 0
- }
- if xNaN || x < y {
- return -1
- }
- if yNaN || x > y {
- return +1
- }
- return 0
-}
diff --git a/tools/vendor/golang.org/x/exp/slices/slices.go b/tools/vendor/golang.org/x/exp/slices/slices.go
index 46ceac343..757383ea1 100644
--- a/tools/vendor/golang.org/x/exp/slices/slices.go
+++ b/tools/vendor/golang.org/x/exp/slices/slices.go
@@ -6,26 +6,22 @@
package slices
import (
- "unsafe"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
+// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
+// these functions should be annotated (provisionally with "//go:fix
+// inline") so that tools can safely and automatically replace calls
+// to exp/slices with calls to std slices by inlining them.
+
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return false
- }
- }
- return true
+ return slices.Equal(s1, s2)
}
// EqualFunc reports whether two slices are equal using an equality
@@ -34,16 +30,7 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, v1 := range s1 {
- v2 := s2[i]
- if !eq(v1, v2) {
- return false
- }
- }
- return true
+ return slices.EqualFunc(s1, s2, eq)
}
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
@@ -53,20 +40,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmpCompare(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
+ return slices.Compare(s1, s2)
}
// CompareFunc is like [Compare] but uses a custom comparison function on each
@@ -75,52 +50,30 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmp(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+ return slices.CompareFunc(s1, s2, cmp)
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[S ~[]E, E comparable](s S, v E) int {
- for i := range s {
- if v == s[i] {
- return i
- }
- }
- return -1
+ return slices.Index(s, v)
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
- for i := range s {
- if f(s[i]) {
- return i
- }
- }
- return -1
+ return slices.IndexFunc(s, f)
}
// Contains reports whether v is present in s.
func Contains[S ~[]E, E comparable](s S, v E) bool {
- return Index(s, v) >= 0
+ return slices.Contains(s, v)
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
- return IndexFunc(s, f) >= 0
+ return slices.ContainsFunc(s, f)
}
// Insert inserts the values v... into s at index i,
@@ -131,92 +84,7 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- m := len(v)
- if m == 0 {
- return s
- }
- n := len(s)
- if i == n {
- return append(s, v...)
- }
- if n+m > cap(s) {
- // Use append rather than make so that we bump the size of
- // the slice up to the next storage class.
- // This is what Grow does but we don't call Grow because
- // that might copy the values twice.
- s2 := append(s[:i], make(S, n+m-i)...)
- copy(s2[i:], v)
- copy(s2[i+m:], s[i:])
- return s2
- }
- s = s[:n+m]
-
- // before:
- // s: aaaaaaaabbbbccccccccdddd
- // ^ ^ ^ ^
- // i i+m n n+m
- // after:
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- //
- // a are the values that don't move in s.
- // v are the values copied in from v.
- // b and c are the values from s that are shifted up in index.
- // d are the values that get overwritten, never to be seen again.
-
- if !overlaps(v, s[i+m:]) {
- // Easy case - v does not overlap either the c or d regions.
- // (It might be in some of a or b, or elsewhere entirely.)
- // The data we copy up doesn't write to v at all, so just do it.
-
- copy(s[i+m:], s[i:])
-
- // Now we have
- // s: aaaaaaaabbbbbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // Note the b values are duplicated.
-
- copy(s[i:], v)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
- }
-
- // The hard case - v overlaps c or d. We can't just shift up
- // the data because we'd move or clobber the values we're trying
- // to insert.
- // So instead, write v on top of d, then rotate.
- copy(s[n:], v)
-
- // Now we have
- // s: aaaaaaaabbbbccccccccvvvv
- // ^ ^ ^ ^
- // i i+m n n+m
-
- rotateRight(s[i:], m)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
-}
-
-// clearSlice sets all elements up to the length of s to the zero value of E.
-// We may use the builtin clear func instead, and remove clearSlice, when upgrading
-// to Go 1.21+.
-func clearSlice[S ~[]E, E any](s S) {
- var zero E
- for i := range s {
- s[i] = zero
- }
+ return slices.Insert(s, i, v...)
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
@@ -225,135 +93,27 @@ func clearSlice[S ~[]E, E any](s S) {
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j:len(s)] // bounds check
-
- if i == j {
- return s
- }
-
- oldlen := len(s)
- s = append(s[:i], s[j:]...)
- clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
- return s
+ return slices.Delete(s, i, j)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// DeleteFunc zeroes the elements between the new length and the original length.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
- i := IndexFunc(s, del)
- if i == -1 {
- return s
- }
- // Don't start copying elements until we find one to delete.
- for j := i + 1; j < len(s); j++ {
- if v := s[j]; !del(v) {
- s[i] = v
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.DeleteFunc(s, del)
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
- _ = s[i:j] // verify that i:j is a valid subslice
-
- if i == j {
- return Insert(s, i, v...)
- }
- if j == len(s) {
- return append(s[:i], v...)
- }
-
- tot := len(s[:i]) + len(v) + len(s[j:])
- if tot > cap(s) {
- // Too big to fit, allocate and copy over.
- s2 := append(s[:i], make(S, tot-i)...) // See Insert
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[j:])
- return s2
- }
-
- r := s[:tot]
-
- if i+len(v) <= j {
- // Easy, as v fits in the deleted portion.
- copy(r[i:], v)
- if i+len(v) != j {
- copy(r[i+len(v):], s[j:])
- }
- clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
- return r
- }
-
- // We are expanding (v is bigger than j-i).
- // The situation is something like this:
- // (example has i=4,j=8,len(s)=16,len(v)=6)
- // s: aaaaxxxxbbbbbbbbyy
- // ^ ^ ^ ^
- // i j len(s) tot
- // a: prefix of s
- // x: deleted range
- // b: more of s
- // y: area to expand into
-
- if !overlaps(r[i+len(v):], v) {
- // Easy, as v is not clobbered by the first copy.
- copy(r[i+len(v):], s[j:])
- copy(r[i:], v)
- return r
- }
-
- // This is a situation where we don't have a single place to which
- // we can copy v. Parts of it need to go to two different places.
- // We want to copy the prefix of v into y and the suffix into x, then
- // rotate |y| spots to the right.
- //
- // v[2:] v[:2]
- // | |
- // s: aaaavvvvbbbbbbbbvv
- // ^ ^ ^ ^
- // i j len(s) tot
- //
- // If either of those two destinations don't alias v, then we're good.
- y := len(v) - (j - i) // length of y portion
-
- if !overlaps(r[i:j], v) {
- copy(r[i:j], v[y:])
- copy(r[len(s):], v[:y])
- rotateRight(r[i:], y)
- return r
- }
- if !overlaps(r[len(s):], v) {
- copy(r[len(s):], v[:y])
- copy(r[i:j], v[y:])
- rotateRight(r[i:], y)
- return r
- }
-
- // Now we know that v overlaps both x and y.
- // That means that the entirety of b is *inside* v.
- // So we don't need to preserve b at all; instead we
- // can copy v first, then copy the b part of v out of
- // v to the right destination.
- k := startIdx(v, s[j:])
- copy(r[i:], v)
- copy(r[i+len(v):], r[i+k:])
- return r
+ return slices.Replace(s, i, j, v...)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
- // Preserve nil in case it matters.
- if s == nil {
- return nil
- }
- return append(S([]E{}), s...)
+ return slices.Clone(s)
}
// Compact replaces consecutive runs of equal elements with a single copy.
@@ -362,40 +122,14 @@ func Clone[S ~[]E, E any](s S) S {
// which may have a smaller length.
// Compact zeroes the elements between the new length and the original length.
func Compact[S ~[]E, E comparable](s S) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if s[k] != s[k-1] {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.Compact(s)
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if !eq(s[k], s[k-1]) {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.CompactFunc(s, eq)
}
// Grow increases the slice's capacity, if necessary, to guarantee space for
@@ -403,113 +137,15 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
- if n < 0 {
- panic("cannot be negative")
- }
- if n -= cap(s) - len(s); n > 0 {
- // TODO(https://go.dev/issue/53888): Make using []E instead of S
- // to workaround a compiler bug where the runtime.growslice optimization
- // does not take effect. Revert when the compiler is fixed.
- s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
- }
- return s
+ return slices.Grow(s, n)
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
-
-// Rotation algorithm explanation:
-//
-// rotate left by 2
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join first parts
-// 89234567 01
-// recursively rotate first left part by 2
-// 23456789 01
-// join at the end
-// 2345678901
-//
-// rotate left by 8
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join last parts
-// 89 23456701
-// recursively rotate second part left by 6
-// 89 01234567
-// join at the end
-// 8901234567
-
-// TODO: There are other rotate algorithms.
-// This algorithm has the desirable property that it moves each element exactly twice.
-// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
-// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
-
-// rotateLeft rotates b left by n spaces.
-// s_final[i] = s_orig[i+r], wrapping around.
-func rotateLeft[E any](s []E, r int) {
- for r != 0 && r != len(s) {
- if r*2 <= len(s) {
- swap(s[:r], s[len(s)-r:])
- s = s[:len(s)-r]
- } else {
- swap(s[:len(s)-r], s[r:])
- s, r = s[len(s)-r:], r*2-len(s)
- }
- }
-}
-func rotateRight[E any](s []E, r int) {
- rotateLeft(s, len(s)-r)
-}
-
-// swap swaps the contents of x and y. x and y must be equal length and disjoint.
-func swap[E any](x, y []E) {
- for i := 0; i < len(x); i++ {
- x[i], y[i] = y[i], x[i]
- }
-}
-
-// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
-func overlaps[E any](a, b []E) bool {
- if len(a) == 0 || len(b) == 0 {
- return false
- }
- elemSize := unsafe.Sizeof(a[0])
- if elemSize == 0 {
- return false
- }
- // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
- // Also see crypto/internal/alias/alias.go:AnyOverlap
- return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
- uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
-}
-
-// startIdx returns the index in haystack where the needle starts.
-// prerequisite: the needle must be aliased entirely inside the haystack.
-func startIdx[E any](haystack, needle []E) int {
- p := &needle[0]
- for i := range haystack {
- if p == &haystack[i] {
- return i
- }
- }
- // TODO: what if the overlap is by a non-integral number of Es?
- panic("needle not found")
+ return slices.Clip(s)
}
// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
- for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
- s[i], s[j] = s[j], s[i]
- }
+ slices.Reverse(s)
}
diff --git a/tools/vendor/golang.org/x/exp/slices/sort.go b/tools/vendor/golang.org/x/exp/slices/sort.go
index f58bbc7ba..e270a7465 100644
--- a/tools/vendor/golang.org/x/exp/slices/sort.go
+++ b/tools/vendor/golang.org/x/exp/slices/sort.go
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
-
package slices
import (
- "math/bits"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
+// TODO(adonovan): add a "//go:fix inline" annotation to each function
+// in this file; see https://go.dev/issue/32816.
+
// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
-func Sort[S ~[]E, E constraints.Ordered](x S) {
- n := len(x)
- pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+func Sort[S ~[]E, E cmp.Ordered](x S) {
+ slices.Sort(x)
}
// SortFunc sorts the slice x in ascending order as determined by the cmp
@@ -29,118 +28,60 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
// To indicate 'uncomparable', return 0 from the function.
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- n := len(x)
- pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+ slices.SortFunc(x, cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- stableCmpFunc(x, len(x), cmp)
+ slices.SortStableFunc(x, cmp)
}
// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmpLess(x[i], x[i-1]) {
- return false
- }
- }
- return true
+func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
+ return slices.IsSorted(x)
}
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmp(x[i], x[i-1]) < 0 {
- return false
- }
- }
- return true
+ return slices.IsSortedFunc(x, cmp)
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Min[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Min: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = min(m, x[i])
- }
- return m
+func Min[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Min(x)
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MinFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) < 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MinFunc(x, cmp)
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Max[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Max: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = max(m, x[i])
- }
- return m
+func Max[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Max(x)
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MaxFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) > 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MaxFunc(x, cmp)
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
- // Inlining is faster than calling BinarySearchFunc with a lambda.
- n := len(x)
- // Define x[-1] < target and x[n] >= target.
- // Invariant: x[i-1] < target, x[j] >= target.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmpLess(x[h], target) {
- i = h + 1 // preserves x[i-1] < target
- } else {
- j = h // preserves x[j] >= target
- }
- }
- // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
- return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
+ return slices.BinarySearch(x, target)
}
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
@@ -151,47 +92,5 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
- n := len(x)
- // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
- // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmp(x[h], target) < 0 {
- i = h + 1 // preserves cmp(x[i - 1], target) < 0
- } else {
- j = h // preserves cmp(x[j], target) >= 0
- }
- }
- // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
- return i, i < n && cmp(x[i], target) == 0
-}
-
-type sortedHint int // hint for pdqsort when choosing the pivot
-
-const (
- unknownHint sortedHint = iota
- increasingHint
- decreasingHint
-)
-
-// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-type xorshift uint64
-
-func (r *xorshift) Next() uint64 {
- *r ^= *r << 13
- *r ^= *r >> 17
- *r ^= *r << 5
- return uint64(*r)
-}
-
-func nextPowerOfTwo(length int) uint {
- return 1 << bits.Len(uint(length))
-}
-
-// isNaN reports whether x is a NaN without requiring the math package.
-// This will always return false if T is not floating-point.
-func isNaN[T constraints.Ordered](x T) bool {
- return x != x
+ return slices.BinarySearchFunc(x, target, cmp)
}
diff --git a/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go
deleted file mode 100644
index 06f2c7a24..000000000
--- a/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-// insertionSortCmpFunc sorts data[a:b] using insertion sort.
-func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownCmpFunc implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
- child++
- }
- if !(cmp(data[first+root], data[first+child]) < 0) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownCmpFunc(data, i, hi, first, cmp)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownCmpFunc(data, lo, i, first, cmp)
- }
-}
-
-// pdqsortCmpFunc sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsCmpFunc(data, a, b, cmp)
- limit--
- }
-
- pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
- if hint == decreasingHint {
- reverseRangeCmpFunc(data, a, b, cmp)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortCmpFunc(data, a, b, cmp) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
- mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortCmpFunc(data, a, mid, limit, cmp)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortCmpFunc(data, mid+1, b, limit, cmp)
- b = mid
- }
- }
-}
-
-// partitionCmpFunc does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !(cmp(data[a], data[i]) < 0) {
- i++
- }
- for i <= j && (cmp(data[a], data[j]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !(cmp(data[i], data[i-1]) < 0) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotCmpFunc chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
- j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
- k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianCmpFunc(data, i, j, k, &swaps, cmp)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
- if cmp(data[b], data[a]) < 0 {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- b, c = order2CmpFunc(data, b, c, swaps, cmp)
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- return b
-}
-
-// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
- return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
-}
-
-func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortCmpFunc(data, a, b, cmp)
- a = b
- b += blockSize
- }
- insertionSortCmpFunc(data, a, n, cmp)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeCmpFunc(data, a, a+blockSize, b, cmp)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeCmpFunc(data, a, m, n, cmp)
- }
- blockSize *= 2
- }
-}
-
-// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmp(data[h], data[a]) < 0 {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !(cmp(data[m], data[h]) < 0) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !(cmp(data[p-c], data[c]) < 0) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateCmpFunc(data, start, m, end, cmp)
- }
- if a < start && start < mid {
- symMergeCmpFunc(data, a, start, mid, cmp)
- }
- if mid < end && end < b {
- symMergeCmpFunc(data, mid, end, b, cmp)
- }
-}
-
-// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeCmpFunc(data, m-i, m, j, cmp)
- i -= j
- } else {
- swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
- j -= i
- }
- }
- // i == j
- swapRangeCmpFunc(data, m-i, m, i, cmp)
-}
diff --git a/tools/vendor/golang.org/x/exp/slices/zsortordered.go b/tools/vendor/golang.org/x/exp/slices/zsortordered.go
deleted file mode 100644
index 99b47c398..000000000
--- a/tools/vendor/golang.org/x/exp/slices/zsortordered.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// insertionSortOrdered sorts data[a:b] using insertion sort.
-func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownOrdered implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
- child++
- }
- if !cmpLess(data[first+root], data[first+child]) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownOrdered(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownOrdered(data, lo, i, first)
- }
-}
-
-// pdqsortOrdered sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortOrdered(data, a, b)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortOrdered(data, a, b)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsOrdered(data, a, b)
- limit--
- }
-
- pivot, hint := choosePivotOrdered(data, a, b)
- if hint == decreasingHint {
- reverseRangeOrdered(data, a, b)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortOrdered(data, a, b) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !cmpLess(data[a-1], data[pivot]) {
- mid := partitionEqualOrdered(data, a, b, pivot)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortOrdered(data, a, mid, limit)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortOrdered(data, mid+1, b, limit)
- b = mid
- }
- }
-}
-
-// partitionOrdered does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !cmpLess(data[a], data[i]) {
- i++
- }
- for i <= j && cmpLess(data[a], data[j]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !cmpLess(data[i], data[i-1]) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotOrdered chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentOrdered(data, i, &swaps)
- j = medianAdjacentOrdered(data, j, &swaps)
- k = medianAdjacentOrdered(data, k, &swaps)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianOrdered(data, i, j, k, &swaps)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if cmpLess(data[b], data[a]) {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
- a, b = order2Ordered(data, a, b, swaps)
- b, c = order2Ordered(data, b, c, swaps)
- a, b = order2Ordered(data, a, b, swaps)
- return b
-}
-
-// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
- return medianOrdered(data, a-1, a, a+1, swaps)
-}
-
-func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableOrdered[E constraints.Ordered](data []E, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortOrdered(data, a, b)
- a = b
- b += blockSize
- }
- insertionSortOrdered(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeOrdered(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeOrdered(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmpLess(data[h], data[a]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !cmpLess(data[m], data[h]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !cmpLess(data[p-c], data[c]) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateOrdered(data, start, m, end)
- }
- if a < start && start < mid {
- symMergeOrdered(data, a, start, mid)
- }
- if mid < end && end < b {
- symMergeOrdered(data, mid, end, b)
- }
-}
-
-// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeOrdered(data, m-i, m, j)
- i -= j
- } else {
- swapRangeOrdered(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRangeOrdered(data, m-i, m, i)
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/attr.go b/tools/vendor/golang.org/x/exp/slog/attr.go
deleted file mode 100644
index a180d0e1d..000000000
--- a/tools/vendor/golang.org/x/exp/slog/attr.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "fmt"
- "time"
-)
-
-// An Attr is a key-value pair.
-type Attr struct {
- Key string
- Value Value
-}
-
-// String returns an Attr for a string value.
-func String(key, value string) Attr {
- return Attr{key, StringValue(value)}
-}
-
-// Int64 returns an Attr for an int64.
-func Int64(key string, value int64) Attr {
- return Attr{key, Int64Value(value)}
-}
-
-// Int converts an int to an int64 and returns
-// an Attr with that value.
-func Int(key string, value int) Attr {
- return Int64(key, int64(value))
-}
-
-// Uint64 returns an Attr for a uint64.
-func Uint64(key string, v uint64) Attr {
- return Attr{key, Uint64Value(v)}
-}
-
-// Float64 returns an Attr for a floating-point number.
-func Float64(key string, v float64) Attr {
- return Attr{key, Float64Value(v)}
-}
-
-// Bool returns an Attr for a bool.
-func Bool(key string, v bool) Attr {
- return Attr{key, BoolValue(v)}
-}
-
-// Time returns an Attr for a time.Time.
-// It discards the monotonic portion.
-func Time(key string, v time.Time) Attr {
- return Attr{key, TimeValue(v)}
-}
-
-// Duration returns an Attr for a time.Duration.
-func Duration(key string, v time.Duration) Attr {
- return Attr{key, DurationValue(v)}
-}
-
-// Group returns an Attr for a Group Value.
-// The first argument is the key; the remaining arguments
-// are converted to Attrs as in [Logger.Log].
-//
-// Use Group to collect several key-value pairs under a single
-// key on a log line, or as the result of LogValue
-// in order to log a single value as multiple Attrs.
-func Group(key string, args ...any) Attr {
- return Attr{key, GroupValue(argsToAttrSlice(args)...)}
-}
-
-func argsToAttrSlice(args []any) []Attr {
- var (
- attr Attr
- attrs []Attr
- )
- for len(args) > 0 {
- attr, args = argsToAttr(args)
- attrs = append(attrs, attr)
- }
- return attrs
-}
-
-// Any returns an Attr for the supplied value.
-// See [Value.AnyValue] for how values are treated.
-func Any(key string, value any) Attr {
- return Attr{key, AnyValue(value)}
-}
-
-// Equal reports whether a and b have equal keys and values.
-func (a Attr) Equal(b Attr) bool {
- return a.Key == b.Key && a.Value.Equal(b.Value)
-}
-
-func (a Attr) String() string {
- return fmt.Sprintf("%s=%s", a.Key, a.Value)
-}
-
-// isEmpty reports whether a has an empty key and a nil value.
-// That can be written as Attr{} or Any("", nil).
-func (a Attr) isEmpty() bool {
- return a.Key == "" && a.Value.num == 0 && a.Value.any == nil
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/doc.go b/tools/vendor/golang.org/x/exp/slog/doc.go
deleted file mode 100644
index 4beaf8674..000000000
--- a/tools/vendor/golang.org/x/exp/slog/doc.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package slog provides structured logging,
-in which log records include a message,
-a severity level, and various other attributes
-expressed as key-value pairs.
-
-It defines a type, [Logger],
-which provides several methods (such as [Logger.Info] and [Logger.Error])
-for reporting events of interest.
-
-Each Logger is associated with a [Handler].
-A Logger output method creates a [Record] from the method arguments
-and passes it to the Handler, which decides how to handle it.
-There is a default Logger accessible through top-level functions
-(such as [Info] and [Error]) that call the corresponding Logger methods.
-
-A log record consists of a time, a level, a message, and a set of key-value
-pairs, where the keys are strings and the values may be of any type.
-As an example,
-
- slog.Info("hello", "count", 3)
-
-creates a record containing the time of the call,
-a level of Info, the message "hello", and a single
-pair with key "count" and value 3.
-
-The [Info] top-level function calls the [Logger.Info] method on the default Logger.
-In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
-Besides these convenience methods for common levels,
-there is also a [Logger.Log] method which takes the level as an argument.
-Each of these methods has a corresponding top-level function that uses the
-default logger.
-
-The default handler formats the log record's message, time, level, and attributes
-as a string and passes it to the [log] package.
-
- 2022/11/08 15:28:26 INFO hello count=3
-
-For more control over the output format, create a logger with a different handler.
-This statement uses [New] to create a new logger with a TextHandler
-that writes structured records in text form to standard error:
-
- logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
-
-[TextHandler] output is a sequence of key=value pairs, easily and unambiguously
-parsed by machine. This statement:
-
- logger.Info("hello", "count", 3)
-
-produces this output:
-
- time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
-
-The package also provides [JSONHandler], whose output is line-delimited JSON:
-
- logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
- logger.Info("hello", "count", 3)
-
-produces this output:
-
- {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
-
-Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
-There are options for setting the minimum level (see Levels, below),
-displaying the source file and line of the log call, and
-modifying attributes before they are logged.
-
-Setting a logger as the default with
-
- slog.SetDefault(logger)
-
-will cause the top-level functions like [Info] to use it.
-[SetDefault] also updates the default logger used by the [log] package,
-so that existing applications that use [log.Printf] and related functions
-will send log records to the logger's handler without needing to be rewritten.
-
-Some attributes are common to many log calls.
-For example, you may wish to include the URL or trace identifier of a server request
-with all log events arising from the request.
-Rather than repeat the attribute with every log call, you can use [Logger.With]
-to construct a new Logger containing the attributes:
-
- logger2 := logger.With("url", r.URL)
-
-The arguments to With are the same key-value pairs used in [Logger.Info].
-The result is a new Logger with the same handler as the original, but additional
-attributes that will appear in the output of every call.
-
-# Levels
-
-A [Level] is an integer representing the importance or severity of a log event.
-The higher the level, the more severe the event.
-This package defines constants for the most common levels,
-but any int can be used as a level.
-
-In an application, you may wish to log messages only at a certain level or greater.
-One common configuration is to log messages at Info or higher levels,
-suppressing debug logging until it is needed.
-The built-in handlers can be configured with the minimum level to output by
-setting [HandlerOptions.Level].
-The program's `main` function typically does this.
-The default value is LevelInfo.
-
-Setting the [HandlerOptions.Level] field to a [Level] value
-fixes the handler's minimum level throughout its lifetime.
-Setting it to a [LevelVar] allows the level to be varied dynamically.
-A LevelVar holds a Level and is safe to read or write from multiple
-goroutines.
-To vary the level dynamically for an entire program, first initialize
-a global LevelVar:
-
- var programLevel = new(slog.LevelVar) // Info by default
-
-Then use the LevelVar to construct a handler, and make it the default:
-
- h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
- slog.SetDefault(slog.New(h))
-
-Now the program can change its logging level with a single statement:
-
- programLevel.Set(slog.LevelDebug)
-
-# Groups
-
-Attributes can be collected into groups.
-A group has a name that is used to qualify the names of its attributes.
-How this qualification is displayed depends on the handler.
-[TextHandler] separates the group and attribute names with a dot.
-[JSONHandler] treats each group as a separate JSON object, with the group name as the key.
-
-Use [Group] to create a Group attribute from a name and a list of key-value pairs:
-
- slog.Group("request",
- "method", r.Method,
- "url", r.URL)
-
-TextHandler would display this group as
-
- request.method=GET request.url=http://example.com
-
-JSONHandler would display it as
-
- "request":{"method":"GET","url":"http://example.com"}
-
-Use [Logger.WithGroup] to qualify all of a Logger's output
-with a group name. Calling WithGroup on a Logger results in a
-new Logger with the same Handler as the original, but with all
-its attributes qualified by the group name.
-
-This can help prevent duplicate attribute keys in large systems,
-where subsystems might use the same keys.
-Pass each subsystem a different Logger with its own group name so that
-potential duplicates are qualified:
-
- logger := slog.Default().With("id", systemID)
- parserLogger := logger.WithGroup("parser")
- parseInput(input, parserLogger)
-
-When parseInput logs with parserLogger, its keys will be qualified with "parser",
-so even if it uses the common key "id", the log line will have distinct keys.
-
-# Contexts
-
-Some handlers may wish to include information from the [context.Context] that is
-available at the call site. One example of such information
-is the identifier for the current span when tracing is enabled.
-
-The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
-argument, as do their corresponding top-level functions.
-
-Although the convenience methods on Logger (Info and so on) and the
-corresponding top-level functions do not take a context, the alternatives ending
-in "Context" do. For example,
-
- slog.InfoContext(ctx, "message")
-
-It is recommended to pass a context to an output method if one is available.
-
-# Attrs and Values
-
-An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
-alternating keys and values. The statement
-
- slog.Info("hello", slog.Int("count", 3))
-
-behaves the same as
-
- slog.Info("hello", "count", 3)
-
-There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
-for common types, as well as the function [Any] for constructing Attrs of any
-type.
-
-The value part of an Attr is a type called [Value].
-Like an [any], a Value can hold any Go value,
-but it can represent typical values, including all numbers and strings,
-without an allocation.
-
-For the most efficient log output, use [Logger.LogAttrs].
-It is similar to [Logger.Log] but accepts only Attrs, not alternating
-keys and values; this allows it, too, to avoid allocation.
-
-The call
-
- logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3))
-
-is the most efficient way to achieve the same output as
-
- slog.Info("hello", "count", 3)
-
-# Customizing a type's logging behavior
-
-If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
-method is used for logging. You can use this to control how values of the type
-appear in logs. For example, you can redact secret information like passwords,
-or gather a struct's fields in a Group. See the examples under [LogValuer] for
-details.
-
-A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
-method handles these cases carefully, avoiding infinite loops and unbounded recursion.
-Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly.
-
-# Wrapping output methods
-
-The logger functions use reflection over the call stack to find the file name
-and line number of the logging call within the application. This can produce
-incorrect source information for functions that wrap slog. For instance, if you
-define this function in file mylog.go:
-
- func Infof(format string, args ...any) {
- slog.Default().Info(fmt.Sprintf(format, args...))
- }
-
-and you call it like this in main.go:
-
- Infof(slog.Default(), "hello, %s", "world")
-
-then slog will report the source file as mylog.go, not main.go.
-
-A correct implementation of Infof will obtain the source location
-(pc) and pass it to NewRecord.
-The Infof function in the package-level example called "wrapping"
-demonstrates how to do this.
-
-# Working with Records
-
-Sometimes a Handler will need to modify a Record
-before passing it on to another Handler or backend.
-A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
-and hidden fields that refer to state (such as attributes) indirectly. This
-means that modifying a simple copy of a Record (e.g. by calling
-[Record.Add] or [Record.AddAttrs] to add attributes)
-may have unexpected effects on the original.
-Before modifying a Record, use [Clone] to
-create a copy that shares no state with the original,
-or create a new Record with [NewRecord]
-and build up its Attrs by traversing the old ones with [Record.Attrs].
-
-# Performance considerations
-
-If profiling your application demonstrates that logging is taking significant time,
-the following suggestions may help.
-
-If many log lines have a common attribute, use [Logger.With] to create a Logger with
-that attribute. The built-in handlers will format that attribute only once, at the
-call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
-and a well-written Handler should take advantage of it.
-
-The arguments to a log call are always evaluated, even if the log event is discarded.
-If possible, defer computation so that it happens only if the value is actually logged.
-For example, consider the call
-
- slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
-
-The URL.String method will be called even if the logger discards Info-level events.
-Instead, pass the URL directly:
-
- slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
-
-The built-in [TextHandler] will call its String method, but only
-if the log event is enabled.
-Avoiding the call to String also preserves the structure of the underlying value.
-For example [JSONHandler] emits the components of the parsed URL as a JSON object.
-If you want to avoid eagerly paying the cost of the String call
-without causing the handler to potentially inspect the structure of the value,
-wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
-
-You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
-calls. Say you need to log some expensive value:
-
- slog.Debug("frobbing", "value", computeExpensiveValue(arg))
-
-Even if this line is disabled, computeExpensiveValue will be called.
-To avoid that, define a type implementing LogValuer:
-
- type expensive struct { arg int }
-
- func (e expensive) LogValue() slog.Value {
- return slog.AnyValue(computeExpensiveValue(e.arg))
- }
-
-Then use a value of that type in log calls:
-
- slog.Debug("frobbing", "value", expensive{arg})
-
-Now computeExpensiveValue will only be called when the line is enabled.
-
-The built-in handlers acquire a lock before calling [io.Writer.Write]
-to ensure that each record is written in one piece. User-defined
-handlers are responsible for their own locking.
-*/
-package slog
diff --git a/tools/vendor/golang.org/x/exp/slog/handler.go b/tools/vendor/golang.org/x/exp/slog/handler.go
deleted file mode 100644
index bd635cb81..000000000
--- a/tools/vendor/golang.org/x/exp/slog/handler.go
+++ /dev/null
@@ -1,577 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "sync"
- "time"
-
- "golang.org/x/exp/slices"
- "golang.org/x/exp/slog/internal/buffer"
-)
-
-// A Handler handles log records produced by a Logger..
-//
-// A typical handler may print log records to standard error,
-// or write them to a file or database, or perhaps augment them
-// with additional attributes and pass them on to another handler.
-//
-// Any of the Handler's methods may be called concurrently with itself
-// or with other methods. It is the responsibility of the Handler to
-// manage this concurrency.
-//
-// Users of the slog package should not invoke Handler methods directly.
-// They should use the methods of [Logger] instead.
-type Handler interface {
- // Enabled reports whether the handler handles records at the given level.
- // The handler ignores records whose level is lower.
- // It is called early, before any arguments are processed,
- // to save effort if the log event should be discarded.
- // If called from a Logger method, the first argument is the context
- // passed to that method, or context.Background() if nil was passed
- // or the method does not take a context.
- // The context is passed so Enabled can use its values
- // to make a decision.
- Enabled(context.Context, Level) bool
-
- // Handle handles the Record.
- // It will only be called when Enabled returns true.
- // The Context argument is as for Enabled.
- // It is present solely to provide Handlers access to the context's values.
- // Canceling the context should not affect record processing.
- // (Among other things, log messages may be necessary to debug a
- // cancellation-related problem.)
- //
- // Handle methods that produce output should observe the following rules:
- // - If r.Time is the zero time, ignore the time.
- // - If r.PC is zero, ignore it.
- // - Attr's values should be resolved.
- // - If an Attr's key and value are both the zero value, ignore the Attr.
- // This can be tested with attr.Equal(Attr{}).
- // - If a group's key is empty, inline the group's Attrs.
- // - If a group has no Attrs (even if it has a non-empty key),
- // ignore it.
- Handle(context.Context, Record) error
-
- // WithAttrs returns a new Handler whose attributes consist of
- // both the receiver's attributes and the arguments.
- // The Handler owns the slice: it may retain, modify or discard it.
- WithAttrs(attrs []Attr) Handler
-
- // WithGroup returns a new Handler with the given group appended to
- // the receiver's existing groups.
- // The keys of all subsequent attributes, whether added by With or in a
- // Record, should be qualified by the sequence of group names.
- //
- // How this qualification happens is up to the Handler, so long as
- // this Handler's attribute keys differ from those of another Handler
- // with a different sequence of group names.
- //
- // A Handler should treat WithGroup as starting a Group of Attrs that ends
- // at the end of the log event. That is,
- //
- // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2))
- //
- // should behave like
- //
- // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
- //
- // If the name is empty, WithGroup returns the receiver.
- WithGroup(name string) Handler
-}
-
-type defaultHandler struct {
- ch *commonHandler
- // log.Output, except for testing
- output func(calldepth int, message string) error
-}
-
-func newDefaultHandler(output func(int, string) error) *defaultHandler {
- return &defaultHandler{
- ch: &commonHandler{json: false},
- output: output,
- }
-}
-
-func (*defaultHandler) Enabled(_ context.Context, l Level) bool {
- return l >= LevelInfo
-}
-
-// Collect the level, attributes and message in a string and
-// write it with the default log.Logger.
-// Let the log.Logger handle time and file/line.
-func (h *defaultHandler) Handle(ctx context.Context, r Record) error {
- buf := buffer.New()
- buf.WriteString(r.Level.String())
- buf.WriteByte(' ')
- buf.WriteString(r.Message)
- state := h.ch.newHandleState(buf, true, " ", nil)
- defer state.free()
- state.appendNonBuiltIns(r)
-
- // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output]
- return h.output(4, buf.String())
-}
-
-func (h *defaultHandler) WithAttrs(as []Attr) Handler {
- return &defaultHandler{h.ch.withAttrs(as), h.output}
-}
-
-func (h *defaultHandler) WithGroup(name string) Handler {
- return &defaultHandler{h.ch.withGroup(name), h.output}
-}
-
-// HandlerOptions are options for a TextHandler or JSONHandler.
-// A zero HandlerOptions consists entirely of default values.
-type HandlerOptions struct {
- // AddSource causes the handler to compute the source code position
- // of the log statement and add a SourceKey attribute to the output.
- AddSource bool
-
- // Level reports the minimum record level that will be logged.
- // The handler discards records with lower levels.
- // If Level is nil, the handler assumes LevelInfo.
- // The handler calls Level.Level for each record processed;
- // to adjust the minimum level dynamically, use a LevelVar.
- Level Leveler
-
- // ReplaceAttr is called to rewrite each non-group attribute before it is logged.
- // The attribute's value has been resolved (see [Value.Resolve]).
- // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded.
- //
- // The built-in attributes with keys "time", "level", "source", and "msg"
- // are passed to this function, except that time is omitted
- // if zero, and source is omitted if AddSource is false.
- //
- // The first argument is a list of currently open groups that contain the
- // Attr. It must not be retained or modified. ReplaceAttr is never called
- // for Group attributes, only their contents. For example, the attribute
- // list
- //
- // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3)
- //
- // results in consecutive calls to ReplaceAttr with the following arguments:
- //
- // nil, Int("a", 1)
- // []string{"g"}, Int("b", 2)
- // nil, Int("c", 3)
- //
- // ReplaceAttr can be used to change the default keys of the built-in
- // attributes, convert types (for example, to replace a `time.Time` with the
- // integer seconds since the Unix epoch), sanitize personal information, or
- // remove attributes from the output.
- ReplaceAttr func(groups []string, a Attr) Attr
-}
-
-// Keys for "built-in" attributes.
-const (
- // TimeKey is the key used by the built-in handlers for the time
- // when the log method is called. The associated Value is a [time.Time].
- TimeKey = "time"
- // LevelKey is the key used by the built-in handlers for the level
- // of the log call. The associated value is a [Level].
- LevelKey = "level"
- // MessageKey is the key used by the built-in handlers for the
- // message of the log call. The associated value is a string.
- MessageKey = "msg"
- // SourceKey is the key used by the built-in handlers for the source file
- // and line of the log call. The associated value is a string.
- SourceKey = "source"
-)
-
-type commonHandler struct {
- json bool // true => output JSON; false => output text
- opts HandlerOptions
- preformattedAttrs []byte
- groupPrefix string // for text: prefix of groups opened in preformatting
- groups []string // all groups started from WithGroup
- nOpenGroups int // the number of groups opened in preformattedAttrs
- mu sync.Mutex
- w io.Writer
-}
-
-func (h *commonHandler) clone() *commonHandler {
- // We can't use assignment because we can't copy the mutex.
- return &commonHandler{
- json: h.json,
- opts: h.opts,
- preformattedAttrs: slices.Clip(h.preformattedAttrs),
- groupPrefix: h.groupPrefix,
- groups: slices.Clip(h.groups),
- nOpenGroups: h.nOpenGroups,
- w: h.w,
- }
-}
-
-// enabled reports whether l is greater than or equal to the
-// minimum level.
-func (h *commonHandler) enabled(l Level) bool {
- minLevel := LevelInfo
- if h.opts.Level != nil {
- minLevel = h.opts.Level.Level()
- }
- return l >= minLevel
-}
-
-func (h *commonHandler) withAttrs(as []Attr) *commonHandler {
- h2 := h.clone()
- // Pre-format the attributes as an optimization.
- prefix := buffer.New()
- defer prefix.Free()
- prefix.WriteString(h.groupPrefix)
- state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix)
- defer state.free()
- if len(h2.preformattedAttrs) > 0 {
- state.sep = h.attrSep()
- }
- state.openGroups()
- for _, a := range as {
- state.appendAttr(a)
- }
- // Remember the new prefix for later keys.
- h2.groupPrefix = state.prefix.String()
- // Remember how many opened groups are in preformattedAttrs,
- // so we don't open them again when we handle a Record.
- h2.nOpenGroups = len(h2.groups)
- return h2
-}
-
-func (h *commonHandler) withGroup(name string) *commonHandler {
- if name == "" {
- return h
- }
- h2 := h.clone()
- h2.groups = append(h2.groups, name)
- return h2
-}
-
-func (h *commonHandler) handle(r Record) error {
- state := h.newHandleState(buffer.New(), true, "", nil)
- defer state.free()
- if h.json {
- state.buf.WriteByte('{')
- }
- // Built-in attributes. They are not in a group.
- stateGroups := state.groups
- state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups.
- rep := h.opts.ReplaceAttr
- // time
- if !r.Time.IsZero() {
- key := TimeKey
- val := r.Time.Round(0) // strip monotonic to match Attr behavior
- if rep == nil {
- state.appendKey(key)
- state.appendTime(val)
- } else {
- state.appendAttr(Time(key, val))
- }
- }
- // level
- key := LevelKey
- val := r.Level
- if rep == nil {
- state.appendKey(key)
- state.appendString(val.String())
- } else {
- state.appendAttr(Any(key, val))
- }
- // source
- if h.opts.AddSource {
- state.appendAttr(Any(SourceKey, r.source()))
- }
- key = MessageKey
- msg := r.Message
- if rep == nil {
- state.appendKey(key)
- state.appendString(msg)
- } else {
- state.appendAttr(String(key, msg))
- }
- state.groups = stateGroups // Restore groups passed to ReplaceAttrs.
- state.appendNonBuiltIns(r)
- state.buf.WriteByte('\n')
-
- h.mu.Lock()
- defer h.mu.Unlock()
- _, err := h.w.Write(*state.buf)
- return err
-}
-
-func (s *handleState) appendNonBuiltIns(r Record) {
- // preformatted Attrs
- if len(s.h.preformattedAttrs) > 0 {
- s.buf.WriteString(s.sep)
- s.buf.Write(s.h.preformattedAttrs)
- s.sep = s.h.attrSep()
- }
- // Attrs in Record -- unlike the built-in ones, they are in groups started
- // from WithGroup.
- s.prefix = buffer.New()
- defer s.prefix.Free()
- s.prefix.WriteString(s.h.groupPrefix)
- s.openGroups()
- r.Attrs(func(a Attr) bool {
- s.appendAttr(a)
- return true
- })
- if s.h.json {
- // Close all open groups.
- for range s.h.groups {
- s.buf.WriteByte('}')
- }
- // Close the top-level object.
- s.buf.WriteByte('}')
- }
-}
-
-// attrSep returns the separator between attributes.
-func (h *commonHandler) attrSep() string {
- if h.json {
- return ","
- }
- return " "
-}
-
-// handleState holds state for a single call to commonHandler.handle.
-// The initial value of sep determines whether to emit a separator
-// before the next key, after which it stays true.
-type handleState struct {
- h *commonHandler
- buf *buffer.Buffer
- freeBuf bool // should buf be freed?
- sep string // separator to write before next key
- prefix *buffer.Buffer // for text: key prefix
- groups *[]string // pool-allocated slice of active groups, for ReplaceAttr
-}
-
-var groupPool = sync.Pool{New: func() any {
- s := make([]string, 0, 10)
- return &s
-}}
-
-func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState {
- s := handleState{
- h: h,
- buf: buf,
- freeBuf: freeBuf,
- sep: sep,
- prefix: prefix,
- }
- if h.opts.ReplaceAttr != nil {
- s.groups = groupPool.Get().(*[]string)
- *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...)
- }
- return s
-}
-
-func (s *handleState) free() {
- if s.freeBuf {
- s.buf.Free()
- }
- if gs := s.groups; gs != nil {
- *gs = (*gs)[:0]
- groupPool.Put(gs)
- }
-}
-
-func (s *handleState) openGroups() {
- for _, n := range s.h.groups[s.h.nOpenGroups:] {
- s.openGroup(n)
- }
-}
-
-// Separator for group names and keys.
-const keyComponentSep = '.'
-
-// openGroup starts a new group of attributes
-// with the given name.
-func (s *handleState) openGroup(name string) {
- if s.h.json {
- s.appendKey(name)
- s.buf.WriteByte('{')
- s.sep = ""
- } else {
- s.prefix.WriteString(name)
- s.prefix.WriteByte(keyComponentSep)
- }
- // Collect group names for ReplaceAttr.
- if s.groups != nil {
- *s.groups = append(*s.groups, name)
- }
-}
-
-// closeGroup ends the group with the given name.
-func (s *handleState) closeGroup(name string) {
- if s.h.json {
- s.buf.WriteByte('}')
- } else {
- (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */]
- }
- s.sep = s.h.attrSep()
- if s.groups != nil {
- *s.groups = (*s.groups)[:len(*s.groups)-1]
- }
-}
-
-// appendAttr appends the Attr's key and value using app.
-// It handles replacement and checking for an empty key.
-// after replacement).
-func (s *handleState) appendAttr(a Attr) {
- if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup {
- var gs []string
- if s.groups != nil {
- gs = *s.groups
- }
- // Resolve before calling ReplaceAttr, so the user doesn't have to.
- a.Value = a.Value.Resolve()
- a = rep(gs, a)
- }
- a.Value = a.Value.Resolve()
- // Elide empty Attrs.
- if a.isEmpty() {
- return
- }
- // Special case: Source.
- if v := a.Value; v.Kind() == KindAny {
- if src, ok := v.Any().(*Source); ok {
- if s.h.json {
- a.Value = src.group()
- } else {
- a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line))
- }
- }
- }
- if a.Value.Kind() == KindGroup {
- attrs := a.Value.Group()
- // Output only non-empty groups.
- if len(attrs) > 0 {
- // Inline a group with an empty key.
- if a.Key != "" {
- s.openGroup(a.Key)
- }
- for _, aa := range attrs {
- s.appendAttr(aa)
- }
- if a.Key != "" {
- s.closeGroup(a.Key)
- }
- }
- } else {
- s.appendKey(a.Key)
- s.appendValue(a.Value)
- }
-}
-
-func (s *handleState) appendError(err error) {
- s.appendString(fmt.Sprintf("!ERROR:%v", err))
-}
-
-func (s *handleState) appendKey(key string) {
- s.buf.WriteString(s.sep)
- if s.prefix != nil {
- // TODO: optimize by avoiding allocation.
- s.appendString(string(*s.prefix) + key)
- } else {
- s.appendString(key)
- }
- if s.h.json {
- s.buf.WriteByte(':')
- } else {
- s.buf.WriteByte('=')
- }
- s.sep = s.h.attrSep()
-}
-
-func (s *handleState) appendString(str string) {
- if s.h.json {
- s.buf.WriteByte('"')
- *s.buf = appendEscapedJSONString(*s.buf, str)
- s.buf.WriteByte('"')
- } else {
- // text
- if needsQuoting(str) {
- *s.buf = strconv.AppendQuote(*s.buf, str)
- } else {
- s.buf.WriteString(str)
- }
- }
-}
-
-func (s *handleState) appendValue(v Value) {
- defer func() {
- if r := recover(); r != nil {
- // If it panics with a nil pointer, the most likely cases are
- // an encoding.TextMarshaler or error fails to guard against nil,
- // in which case "" seems to be the feasible choice.
- //
- // Adapted from the code in fmt/print.go.
- if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() {
- s.appendString("")
- return
- }
-
- // Otherwise just print the original panic message.
- s.appendString(fmt.Sprintf("!PANIC: %v", r))
- }
- }()
-
- var err error
- if s.h.json {
- err = appendJSONValue(s, v)
- } else {
- err = appendTextValue(s, v)
- }
- if err != nil {
- s.appendError(err)
- }
-}
-
-func (s *handleState) appendTime(t time.Time) {
- if s.h.json {
- appendJSONTime(s, t)
- } else {
- writeTimeRFC3339Millis(s.buf, t)
- }
-}
-
-// This takes half the time of Time.AppendFormat.
-func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) {
- year, month, day := t.Date()
- buf.WritePosIntWidth(year, 4)
- buf.WriteByte('-')
- buf.WritePosIntWidth(int(month), 2)
- buf.WriteByte('-')
- buf.WritePosIntWidth(day, 2)
- buf.WriteByte('T')
- hour, min, sec := t.Clock()
- buf.WritePosIntWidth(hour, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(min, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(sec, 2)
- ns := t.Nanosecond()
- buf.WriteByte('.')
- buf.WritePosIntWidth(ns/1e6, 3)
- _, offsetSeconds := t.Zone()
- if offsetSeconds == 0 {
- buf.WriteByte('Z')
- } else {
- offsetMinutes := offsetSeconds / 60
- if offsetMinutes < 0 {
- buf.WriteByte('-')
- offsetMinutes = -offsetMinutes
- } else {
- buf.WriteByte('+')
- }
- buf.WritePosIntWidth(offsetMinutes/60, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(offsetMinutes%60, 2)
- }
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/tools/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
deleted file mode 100644
index 7786c166e..000000000
--- a/tools/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buffer provides a pool-allocated byte buffer.
-package buffer
-
-import (
- "sync"
-)
-
-// Buffer adapted from go/src/fmt/print.go
-type Buffer []byte
-
-// Having an initial size gives a dramatic speedup.
-var bufPool = sync.Pool{
- New: func() any {
- b := make([]byte, 0, 1024)
- return (*Buffer)(&b)
- },
-}
-
-func New() *Buffer {
- return bufPool.Get().(*Buffer)
-}
-
-func (b *Buffer) Free() {
- // To reduce peak allocation, return only smaller buffers to the pool.
- const maxBufferSize = 16 << 10
- if cap(*b) <= maxBufferSize {
- *b = (*b)[:0]
- bufPool.Put(b)
- }
-}
-
-func (b *Buffer) Reset() {
- *b = (*b)[:0]
-}
-
-func (b *Buffer) Write(p []byte) (int, error) {
- *b = append(*b, p...)
- return len(p), nil
-}
-
-func (b *Buffer) WriteString(s string) {
- *b = append(*b, s...)
-}
-
-func (b *Buffer) WriteByte(c byte) {
- *b = append(*b, c)
-}
-
-func (b *Buffer) WritePosInt(i int) {
- b.WritePosIntWidth(i, 0)
-}
-
-// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left
-// by zeroes to the given width. Use a width of 0 to omit padding.
-func (b *Buffer) WritePosIntWidth(i, width int) {
- // Cheap integer to fixed-width decimal ASCII.
- // Copied from log/log.go.
-
- if i < 0 {
- panic("negative int")
- }
-
- // Assemble decimal in reverse order.
- var bb [20]byte
- bp := len(bb) - 1
- for i >= 10 || width > 1 {
- width--
- q := i / 10
- bb[bp] = byte('0' + i - q*10)
- bp--
- i = q
- }
- // i < 10
- bb[bp] = byte('0' + i)
- b.Write(bb[bp:])
-}
-
-func (b *Buffer) String() string {
- return string(*b)
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/tools/vendor/golang.org/x/exp/slog/internal/ignorepc.go
deleted file mode 100644
index d1256426f..000000000
--- a/tools/vendor/golang.org/x/exp/slog/internal/ignorepc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-// If IgnorePC is true, do not invoke runtime.Callers to get the pc.
-// This is solely for benchmarking the slowdown from runtime.Callers.
-var IgnorePC = false
diff --git a/tools/vendor/golang.org/x/exp/slog/json_handler.go b/tools/vendor/golang.org/x/exp/slog/json_handler.go
deleted file mode 100644
index 157ada869..000000000
--- a/tools/vendor/golang.org/x/exp/slog/json_handler.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
- "unicode/utf8"
-
- "golang.org/x/exp/slog/internal/buffer"
-)
-
-// JSONHandler is a Handler that writes Records to an io.Writer as
-// line-delimited JSON objects.
-type JSONHandler struct {
- *commonHandler
-}
-
-// NewJSONHandler creates a JSONHandler that writes to w,
-// using the given options.
-// If opts is nil, the default options are used.
-func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
- if opts == nil {
- opts = &HandlerOptions{}
- }
- return &JSONHandler{
- &commonHandler{
- json: true,
- w: w,
- opts: *opts,
- },
- }
-}
-
-// Enabled reports whether the handler handles records at the given level.
-// The handler ignores records whose level is lower.
-func (h *JSONHandler) Enabled(_ context.Context, level Level) bool {
- return h.commonHandler.enabled(level)
-}
-
-// WithAttrs returns a new JSONHandler whose attributes consists
-// of h's attributes followed by attrs.
-func (h *JSONHandler) WithAttrs(attrs []Attr) Handler {
- return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
-}
-
-func (h *JSONHandler) WithGroup(name string) Handler {
- return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)}
-}
-
-// Handle formats its argument Record as a JSON object on a single line.
-//
-// If the Record's time is zero, the time is omitted.
-// Otherwise, the key is "time"
-// and the value is output as with json.Marshal.
-//
-// If the Record's level is zero, the level is omitted.
-// Otherwise, the key is "level"
-// and the value of [Level.String] is output.
-//
-// If the AddSource option is set and source information is available,
-// the key is "source"
-// and the value is output as "FILE:LINE".
-//
-// The message's key is "msg".
-//
-// To modify these or other attributes, or remove them from the output, use
-// [HandlerOptions.ReplaceAttr].
-//
-// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false),
-// with two exceptions.
-//
-// First, an Attr whose Value is of type error is formatted as a string, by
-// calling its Error method. Only errors in Attrs receive this special treatment,
-// not errors embedded in structs, slices, maps or other data structures that
-// are processed by the encoding/json package.
-//
-// Second, an encoding failure does not cause Handle to return an error.
-// Instead, the error message is formatted as a string.
-//
-// Each call to Handle results in a single serialized call to io.Writer.Write.
-func (h *JSONHandler) Handle(_ context.Context, r Record) error {
- return h.commonHandler.handle(r)
-}
-
-// Adapted from time.Time.MarshalJSON to avoid allocation.
-func appendJSONTime(s *handleState, t time.Time) {
- if y := t.Year(); y < 0 || y >= 10000 {
- // RFC 3339 is clear that years are 4 digits exactly.
- // See golang.org/issue/4556#c15 for more discussion.
- s.appendError(errors.New("time.Time year outside of range [0,9999]"))
- }
- s.buf.WriteByte('"')
- *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano)
- s.buf.WriteByte('"')
-}
-
-func appendJSONValue(s *handleState, v Value) error {
- switch v.Kind() {
- case KindString:
- s.appendString(v.str())
- case KindInt64:
- *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10)
- case KindUint64:
- *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10)
- case KindFloat64:
- // json.Marshal is funny about floats; it doesn't
- // always match strconv.AppendFloat. So just call it.
- // That's expensive, but floats are rare.
- if err := appendJSONMarshal(s.buf, v.Float64()); err != nil {
- return err
- }
- case KindBool:
- *s.buf = strconv.AppendBool(*s.buf, v.Bool())
- case KindDuration:
- // Do what json.Marshal does.
- *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10)
- case KindTime:
- s.appendTime(v.Time())
- case KindAny:
- a := v.Any()
- _, jm := a.(json.Marshaler)
- if err, ok := a.(error); ok && !jm {
- s.appendString(err.Error())
- } else {
- return appendJSONMarshal(s.buf, a)
- }
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
- return nil
-}
-
-func appendJSONMarshal(buf *buffer.Buffer, v any) error {
- // Use a json.Encoder to avoid escaping HTML.
- var bb bytes.Buffer
- enc := json.NewEncoder(&bb)
- enc.SetEscapeHTML(false)
- if err := enc.Encode(v); err != nil {
- return err
- }
- bs := bb.Bytes()
- buf.Write(bs[:len(bs)-1]) // remove final newline
- return nil
-}
-
-// appendEscapedJSONString escapes s for JSON and appends it to buf.
-// It does not surround the string in quotation marks.
-//
-// Modified from encoding/json/encode.go:encodeState.string,
-// with escapeHTML set to false.
-func appendEscapedJSONString(buf []byte, s string) []byte {
- char := func(b byte) { buf = append(buf, b) }
- str := func(s string) { buf = append(buf, s...) }
-
- start := 0
- for i := 0; i < len(s); {
- if b := s[i]; b < utf8.RuneSelf {
- if safeSet[b] {
- i++
- continue
- }
- if start < i {
- str(s[start:i])
- }
- char('\\')
- switch b {
- case '\\', '"':
- char(b)
- case '\n':
- char('n')
- case '\r':
- char('r')
- case '\t':
- char('t')
- default:
- // This encodes bytes < 0x20 except for \t, \n and \r.
- str(`u00`)
- char(hex[b>>4])
- char(hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError && size == 1 {
- if start < i {
- str(s[start:i])
- }
- str(`\ufffd`)
- i += size
- start = i
- continue
- }
- // U+2028 is LINE SEPARATOR.
- // U+2029 is PARAGRAPH SEPARATOR.
- // They are both technically valid characters in JSON strings,
- // but don't work in JSONP, which has to be evaluated as JavaScript,
- // and can lead to security holes there. It is valid JSON to
- // escape them, so we do so unconditionally.
- // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
- if c == '\u2028' || c == '\u2029' {
- if start < i {
- str(s[start:i])
- }
- str(`\u202`)
- char(hex[c&0xF])
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- str(s[start:])
- }
- return buf
-}
-
-var hex = "0123456789abcdef"
-
-// Copied from encoding/json/tables.go.
-//
-// safeSet holds the value true if the ASCII character with the given array
-// position can be represented inside a JSON string without any further
-// escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), and the backslash character ("\").
-var safeSet = [utf8.RuneSelf]bool{
- ' ': true,
- '!': true,
- '"': false,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '(': true,
- ')': true,
- '*': true,
- '+': true,
- ',': true,
- '-': true,
- '.': true,
- '/': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- ':': true,
- ';': true,
- '<': true,
- '=': true,
- '>': true,
- '?': true,
- '@': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'V': true,
- 'W': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '[': true,
- '\\': false,
- ']': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '{': true,
- '|': true,
- '}': true,
- '~': true,
- '\u007f': true,
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/level.go b/tools/vendor/golang.org/x/exp/slog/level.go
deleted file mode 100644
index b2365f0aa..000000000
--- a/tools/vendor/golang.org/x/exp/slog/level.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "sync/atomic"
-)
-
-// A Level is the importance or severity of a log event.
-// The higher the level, the more important or severe the event.
-type Level int
-
-// Level numbers are inherently arbitrary,
-// but we picked them to satisfy three constraints.
-// Any system can map them to another numbering scheme if it wishes.
-//
-// First, we wanted the default level to be Info, Since Levels are ints, Info is
-// the default value for int, zero.
-//
-
-// Second, we wanted to make it easy to use levels to specify logger verbosity.
-// Since a larger level means a more severe event, a logger that accepts events
-// with smaller (or more negative) level means a more verbose logger. Logger
-// verbosity is thus the negation of event severity, and the default verbosity
-// of 0 accepts all events at least as severe as INFO.
-//
-// Third, we wanted some room between levels to accommodate schemes with named
-// levels between ours. For example, Google Cloud Logging defines a Notice level
-// between Info and Warn. Since there are only a few of these intermediate
-// levels, the gap between the numbers need not be large. Our gap of 4 matches
-// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
-// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
-// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
-// does not. But those OpenTelemetry levels can still be represented as slog
-// Levels by using the appropriate integers.
-//
-// Names for common levels.
-const (
- LevelDebug Level = -4
- LevelInfo Level = 0
- LevelWarn Level = 4
- LevelError Level = 8
-)
-
-// String returns a name for the level.
-// If the level has a name, then that name
-// in uppercase is returned.
-// If the level is between named values, then
-// an integer is appended to the uppercased name.
-// Examples:
-//
-// LevelWarn.String() => "WARN"
-// (LevelInfo+2).String() => "INFO+2"
-func (l Level) String() string {
- str := func(base string, val Level) string {
- if val == 0 {
- return base
- }
- return fmt.Sprintf("%s%+d", base, val)
- }
-
- switch {
- case l < LevelInfo:
- return str("DEBUG", l-LevelDebug)
- case l < LevelWarn:
- return str("INFO", l-LevelInfo)
- case l < LevelError:
- return str("WARN", l-LevelWarn)
- default:
- return str("ERROR", l-LevelError)
- }
-}
-
-// MarshalJSON implements [encoding/json.Marshaler]
-// by quoting the output of [Level.String].
-func (l Level) MarshalJSON() ([]byte, error) {
- // AppendQuote is sufficient for JSON-encoding all Level strings.
- // They don't contain any runes that would produce invalid JSON
- // when escaped.
- return strconv.AppendQuote(nil, l.String()), nil
-}
-
-// UnmarshalJSON implements [encoding/json.Unmarshaler]
-// It accepts any string produced by [Level.MarshalJSON],
-// ignoring case.
-// It also accepts numeric offsets that would result in a different string on
-// output. For example, "Error-8" would marshal as "INFO".
-func (l *Level) UnmarshalJSON(data []byte) error {
- s, err := strconv.Unquote(string(data))
- if err != nil {
- return err
- }
- return l.parse(s)
-}
-
-// MarshalText implements [encoding.TextMarshaler]
-// by calling [Level.String].
-func (l Level) MarshalText() ([]byte, error) {
- return []byte(l.String()), nil
-}
-
-// UnmarshalText implements [encoding.TextUnmarshaler].
-// It accepts any string produced by [Level.MarshalText],
-// ignoring case.
-// It also accepts numeric offsets that would result in a different string on
-// output. For example, "Error-8" would marshal as "INFO".
-func (l *Level) UnmarshalText(data []byte) error {
- return l.parse(string(data))
-}
-
-func (l *Level) parse(s string) (err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("slog: level string %q: %w", s, err)
- }
- }()
-
- name := s
- offset := 0
- if i := strings.IndexAny(s, "+-"); i >= 0 {
- name = s[:i]
- offset, err = strconv.Atoi(s[i:])
- if err != nil {
- return err
- }
- }
- switch strings.ToUpper(name) {
- case "DEBUG":
- *l = LevelDebug
- case "INFO":
- *l = LevelInfo
- case "WARN":
- *l = LevelWarn
- case "ERROR":
- *l = LevelError
- default:
- return errors.New("unknown name")
- }
- *l += Level(offset)
- return nil
-}
-
-// Level returns the receiver.
-// It implements Leveler.
-func (l Level) Level() Level { return l }
-
-// A LevelVar is a Level variable, to allow a Handler level to change
-// dynamically.
-// It implements Leveler as well as a Set method,
-// and it is safe for use by multiple goroutines.
-// The zero LevelVar corresponds to LevelInfo.
-type LevelVar struct {
- val atomic.Int64
-}
-
-// Level returns v's level.
-func (v *LevelVar) Level() Level {
- return Level(int(v.val.Load()))
-}
-
-// Set sets v's level to l.
-func (v *LevelVar) Set(l Level) {
- v.val.Store(int64(l))
-}
-
-func (v *LevelVar) String() string {
- return fmt.Sprintf("LevelVar(%s)", v.Level())
-}
-
-// MarshalText implements [encoding.TextMarshaler]
-// by calling [Level.MarshalText].
-func (v *LevelVar) MarshalText() ([]byte, error) {
- return v.Level().MarshalText()
-}
-
-// UnmarshalText implements [encoding.TextUnmarshaler]
-// by calling [Level.UnmarshalText].
-func (v *LevelVar) UnmarshalText(data []byte) error {
- var l Level
- if err := l.UnmarshalText(data); err != nil {
- return err
- }
- v.Set(l)
- return nil
-}
-
-// A Leveler provides a Level value.
-//
-// As Level itself implements Leveler, clients typically supply
-// a Level value wherever a Leveler is needed, such as in HandlerOptions.
-// Clients who need to vary the level dynamically can provide a more complex
-// Leveler implementation such as *LevelVar.
-type Leveler interface {
- Level() Level
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/logger.go b/tools/vendor/golang.org/x/exp/slog/logger.go
deleted file mode 100644
index e87ec9936..000000000
--- a/tools/vendor/golang.org/x/exp/slog/logger.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "log"
- "runtime"
- "sync/atomic"
- "time"
-
- "golang.org/x/exp/slog/internal"
-)
-
-var defaultLogger atomic.Value
-
-func init() {
- defaultLogger.Store(New(newDefaultHandler(log.Output)))
-}
-
-// Default returns the default Logger.
-func Default() *Logger { return defaultLogger.Load().(*Logger) }
-
-// SetDefault makes l the default Logger.
-// After this call, output from the log package's default Logger
-// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
-func SetDefault(l *Logger) {
- defaultLogger.Store(l)
- // If the default's handler is a defaultHandler, then don't use a handleWriter,
- // or we'll deadlock as they both try to acquire the log default mutex.
- // The defaultHandler will use whatever the log default writer is currently
- // set to, which is correct.
- // This can occur with SetDefault(Default()).
- // See TestSetDefault.
- if _, ok := l.Handler().(*defaultHandler); !ok {
- capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0
- log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC})
- log.SetFlags(0) // we want just the log message, no time or location
- }
-}
-
-// handlerWriter is an io.Writer that calls a Handler.
-// It is used to link the default log.Logger to the default slog.Logger.
-type handlerWriter struct {
- h Handler
- level Level
- capturePC bool
-}
-
-func (w *handlerWriter) Write(buf []byte) (int, error) {
- if !w.h.Enabled(context.Background(), w.level) {
- return 0, nil
- }
- var pc uintptr
- if !internal.IgnorePC && w.capturePC {
- // skip [runtime.Callers, w.Write, Logger.Output, log.Print]
- var pcs [1]uintptr
- runtime.Callers(4, pcs[:])
- pc = pcs[0]
- }
-
- // Remove final newline.
- origLen := len(buf) // Report that the entire buf was written.
- if len(buf) > 0 && buf[len(buf)-1] == '\n' {
- buf = buf[:len(buf)-1]
- }
- r := NewRecord(time.Now(), w.level, string(buf), pc)
- return origLen, w.h.Handle(context.Background(), r)
-}
-
-// A Logger records structured information about each call to its
-// Log, Debug, Info, Warn, and Error methods.
-// For each call, it creates a Record and passes it to a Handler.
-//
-// To create a new Logger, call [New] or a Logger method
-// that begins "With".
-type Logger struct {
- handler Handler // for structured logging
-}
-
-func (l *Logger) clone() *Logger {
- c := *l
- return &c
-}
-
-// Handler returns l's Handler.
-func (l *Logger) Handler() Handler { return l.handler }
-
-// With returns a new Logger that includes the given arguments, converted to
-// Attrs as in [Logger.Log].
-// The Attrs will be added to each output from the Logger.
-// The new Logger shares the old Logger's context.
-// The new Logger's handler is the result of calling WithAttrs on the receiver's
-// handler.
-func (l *Logger) With(args ...any) *Logger {
- c := l.clone()
- c.handler = l.handler.WithAttrs(argsToAttrSlice(args))
- return c
-}
-
-// WithGroup returns a new Logger that starts a group. The keys of all
-// attributes added to the Logger will be qualified by the given name.
-// (How that qualification happens depends on the [Handler.WithGroup]
-// method of the Logger's Handler.)
-// The new Logger shares the old Logger's context.
-//
-// The new Logger's handler is the result of calling WithGroup on the receiver's
-// handler.
-func (l *Logger) WithGroup(name string) *Logger {
- c := l.clone()
- c.handler = l.handler.WithGroup(name)
- return c
-
-}
-
-// New creates a new Logger with the given non-nil Handler and a nil context.
-func New(h Handler) *Logger {
- if h == nil {
- panic("nil Handler")
- }
- return &Logger{handler: h}
-}
-
-// With calls Logger.With on the default logger.
-func With(args ...any) *Logger {
- return Default().With(args...)
-}
-
-// Enabled reports whether l emits log records at the given context and level.
-func (l *Logger) Enabled(ctx context.Context, level Level) bool {
- if ctx == nil {
- ctx = context.Background()
- }
- return l.Handler().Enabled(ctx, level)
-}
-
-// NewLogLogger returns a new log.Logger such that each call to its Output method
-// dispatches a Record to the specified handler. The logger acts as a bridge from
-// the older log API to newer structured logging handlers.
-func NewLogLogger(h Handler, level Level) *log.Logger {
- return log.New(&handlerWriter{h, level, true}, "", 0)
-}
-
-// Log emits a log record with the current time and the given level and message.
-// The Record's Attrs consist of the Logger's attributes followed by
-// the Attrs specified by args.
-//
-// The attribute arguments are processed as follows:
-// - If an argument is an Attr, it is used as is.
-// - If an argument is a string and this is not the last argument,
-// the following argument is treated as the value and the two are combined
-// into an Attr.
-// - Otherwise, the argument is treated as a value with key "!BADKEY".
-func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) {
- l.log(ctx, level, msg, args...)
-}
-
-// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
-func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- l.logAttrs(ctx, level, msg, attrs...)
-}
-
-// Debug logs at LevelDebug.
-func (l *Logger) Debug(msg string, args ...any) {
- l.log(nil, LevelDebug, msg, args...)
-}
-
-// DebugContext logs at LevelDebug with the given context.
-func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelDebug, msg, args...)
-}
-
-// DebugCtx logs at LevelDebug with the given context.
-// Deprecated: Use Logger.DebugContext.
-func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelDebug, msg, args...)
-}
-
-// Info logs at LevelInfo.
-func (l *Logger) Info(msg string, args ...any) {
- l.log(nil, LevelInfo, msg, args...)
-}
-
-// InfoContext logs at LevelInfo with the given context.
-func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelInfo, msg, args...)
-}
-
-// InfoCtx logs at LevelInfo with the given context.
-// Deprecated: Use Logger.InfoContext.
-func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelInfo, msg, args...)
-}
-
-// Warn logs at LevelWarn.
-func (l *Logger) Warn(msg string, args ...any) {
- l.log(nil, LevelWarn, msg, args...)
-}
-
-// WarnContext logs at LevelWarn with the given context.
-func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelWarn, msg, args...)
-}
-
-// WarnCtx logs at LevelWarn with the given context.
-// Deprecated: Use Logger.WarnContext.
-func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelWarn, msg, args...)
-}
-
-// Error logs at LevelError.
-func (l *Logger) Error(msg string, args ...any) {
- l.log(nil, LevelError, msg, args...)
-}
-
-// ErrorContext logs at LevelError with the given context.
-func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelError, msg, args...)
-}
-
-// ErrorCtx logs at LevelError with the given context.
-// Deprecated: Use Logger.ErrorContext.
-func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelError, msg, args...)
-}
-
-// log is the low-level logging method for methods that take ...any.
-// It must always be called directly by an exported logging method
-// or function, because it uses a fixed call depth to obtain the pc.
-func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) {
- if !l.Enabled(ctx, level) {
- return
- }
- var pc uintptr
- if !internal.IgnorePC {
- var pcs [1]uintptr
- // skip [runtime.Callers, this function, this function's caller]
- runtime.Callers(3, pcs[:])
- pc = pcs[0]
- }
- r := NewRecord(time.Now(), level, msg, pc)
- r.Add(args...)
- if ctx == nil {
- ctx = context.Background()
- }
- _ = l.Handler().Handle(ctx, r)
-}
-
-// logAttrs is like [Logger.log], but for methods that take ...Attr.
-func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- if !l.Enabled(ctx, level) {
- return
- }
- var pc uintptr
- if !internal.IgnorePC {
- var pcs [1]uintptr
- // skip [runtime.Callers, this function, this function's caller]
- runtime.Callers(3, pcs[:])
- pc = pcs[0]
- }
- r := NewRecord(time.Now(), level, msg, pc)
- r.AddAttrs(attrs...)
- if ctx == nil {
- ctx = context.Background()
- }
- _ = l.Handler().Handle(ctx, r)
-}
-
-// Debug calls Logger.Debug on the default logger.
-func Debug(msg string, args ...any) {
- Default().log(nil, LevelDebug, msg, args...)
-}
-
-// DebugContext calls Logger.DebugContext on the default logger.
-func DebugContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelDebug, msg, args...)
-}
-
-// Info calls Logger.Info on the default logger.
-func Info(msg string, args ...any) {
- Default().log(nil, LevelInfo, msg, args...)
-}
-
-// InfoContext calls Logger.InfoContext on the default logger.
-func InfoContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelInfo, msg, args...)
-}
-
-// Warn calls Logger.Warn on the default logger.
-func Warn(msg string, args ...any) {
- Default().log(nil, LevelWarn, msg, args...)
-}
-
-// WarnContext calls Logger.WarnContext on the default logger.
-func WarnContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelWarn, msg, args...)
-}
-
-// Error calls Logger.Error on the default logger.
-func Error(msg string, args ...any) {
- Default().log(nil, LevelError, msg, args...)
-}
-
-// ErrorContext calls Logger.ErrorContext on the default logger.
-func ErrorContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelError, msg, args...)
-}
-
-// DebugCtx calls Logger.DebugContext on the default logger.
-// Deprecated: call DebugContext.
-func DebugCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelDebug, msg, args...)
-}
-
-// InfoCtx calls Logger.InfoContext on the default logger.
-// Deprecated: call InfoContext.
-func InfoCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelInfo, msg, args...)
-}
-
-// WarnCtx calls Logger.WarnContext on the default logger.
-// Deprecated: call WarnContext.
-func WarnCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelWarn, msg, args...)
-}
-
-// ErrorCtx calls Logger.ErrorContext on the default logger.
-// Deprecated: call ErrorContext.
-func ErrorCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelError, msg, args...)
-}
-
-// Log calls Logger.Log on the default logger.
-func Log(ctx context.Context, level Level, msg string, args ...any) {
- Default().log(ctx, level, msg, args...)
-}
-
-// LogAttrs calls Logger.LogAttrs on the default logger.
-func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- Default().logAttrs(ctx, level, msg, attrs...)
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/noplog.bench b/tools/vendor/golang.org/x/exp/slog/noplog.bench
deleted file mode 100644
index ed9296ff6..000000000
--- a/tools/vendor/golang.org/x/exp/slog/noplog.bench
+++ /dev/null
@@ -1,36 +0,0 @@
-goos: linux
-goarch: amd64
-pkg: golang.org/x/exp/slog
-cpu: Intel(R) Xeon(R) CPU @ 2.20GHz
-BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op
-PASS
-ok golang.org/x/exp/slog 40.566s
diff --git a/tools/vendor/golang.org/x/exp/slog/record.go b/tools/vendor/golang.org/x/exp/slog/record.go
deleted file mode 100644
index 38b3440f7..000000000
--- a/tools/vendor/golang.org/x/exp/slog/record.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "runtime"
- "time"
-
- "golang.org/x/exp/slices"
-)
-
-const nAttrsInline = 5
-
-// A Record holds information about a log event.
-// Copies of a Record share state.
-// Do not modify a Record after handing out a copy to it.
-// Use [Record.Clone] to create a copy with no shared state.
-type Record struct {
- // The time at which the output method (Log, Info, etc.) was called.
- Time time.Time
-
- // The log message.
- Message string
-
- // The level of the event.
- Level Level
-
- // The program counter at the time the record was constructed, as determined
- // by runtime.Callers. If zero, no program counter is available.
- //
- // The only valid use for this value is as an argument to
- // [runtime.CallersFrames]. In particular, it must not be passed to
- // [runtime.FuncForPC].
- PC uintptr
-
- // Allocation optimization: an inline array sized to hold
- // the majority of log calls (based on examination of open-source
- // code). It holds the start of the list of Attrs.
- front [nAttrsInline]Attr
-
- // The number of Attrs in front.
- nFront int
-
- // The list of Attrs except for those in front.
- // Invariants:
- // - len(back) > 0 iff nFront == len(front)
- // - Unused array elements are zero. Used to detect mistakes.
- back []Attr
-}
-
-// NewRecord creates a Record from the given arguments.
-// Use [Record.AddAttrs] to add attributes to the Record.
-//
-// NewRecord is intended for logging APIs that want to support a [Handler] as
-// a backend.
-func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
- return Record{
- Time: t,
- Message: msg,
- Level: level,
- PC: pc,
- }
-}
-
-// Clone returns a copy of the record with no shared state.
-// The original record and the clone can both be modified
-// without interfering with each other.
-func (r Record) Clone() Record {
- r.back = slices.Clip(r.back) // prevent append from mutating shared array
- return r
-}
-
-// NumAttrs returns the number of attributes in the Record.
-func (r Record) NumAttrs() int {
- return r.nFront + len(r.back)
-}
-
-// Attrs calls f on each Attr in the Record.
-// Iteration stops if f returns false.
-func (r Record) Attrs(f func(Attr) bool) {
- for i := 0; i < r.nFront; i++ {
- if !f(r.front[i]) {
- return
- }
- }
- for _, a := range r.back {
- if !f(a) {
- return
- }
- }
-}
-
-// AddAttrs appends the given Attrs to the Record's list of Attrs.
-func (r *Record) AddAttrs(attrs ...Attr) {
- n := copy(r.front[r.nFront:], attrs)
- r.nFront += n
- // Check if a copy was modified by slicing past the end
- // and seeing if the Attr there is non-zero.
- if cap(r.back) > len(r.back) {
- end := r.back[:len(r.back)+1][len(r.back)]
- if !end.isEmpty() {
- panic("copies of a slog.Record were both modified")
- }
- }
- r.back = append(r.back, attrs[n:]...)
-}
-
-// Add converts the args to Attrs as described in [Logger.Log],
-// then appends the Attrs to the Record's list of Attrs.
-func (r *Record) Add(args ...any) {
- var a Attr
- for len(args) > 0 {
- a, args = argsToAttr(args)
- if r.nFront < len(r.front) {
- r.front[r.nFront] = a
- r.nFront++
- } else {
- if r.back == nil {
- r.back = make([]Attr, 0, countAttrs(args))
- }
- r.back = append(r.back, a)
- }
- }
-
-}
-
-// countAttrs returns the number of Attrs that would be created from args.
-func countAttrs(args []any) int {
- n := 0
- for i := 0; i < len(args); i++ {
- n++
- if _, ok := args[i].(string); ok {
- i++
- }
- }
- return n
-}
-
-const badKey = "!BADKEY"
-
-// argsToAttr turns a prefix of the nonempty args slice into an Attr
-// and returns the unconsumed portion of the slice.
-// If args[0] is an Attr, it returns it.
-// If args[0] is a string, it treats the first two elements as
-// a key-value pair.
-// Otherwise, it treats args[0] as a value with a missing key.
-func argsToAttr(args []any) (Attr, []any) {
- switch x := args[0].(type) {
- case string:
- if len(args) == 1 {
- return String(badKey, x), nil
- }
- return Any(x, args[1]), args[2:]
-
- case Attr:
- return x, args[1:]
-
- default:
- return Any(badKey, x), args[1:]
- }
-}
-
-// Source describes the location of a line of source code.
-type Source struct {
- // Function is the package path-qualified function name containing the
- // source line. If non-empty, this string uniquely identifies a single
- // function in the program. This may be the empty string if not known.
- Function string `json:"function"`
- // File and Line are the file name and line number (1-based) of the source
- // line. These may be the empty string and zero, respectively, if not known.
- File string `json:"file"`
- Line int `json:"line"`
-}
-
-// attrs returns the non-zero fields of s as a slice of attrs.
-// It is similar to a LogValue method, but we don't want Source
-// to implement LogValuer because it would be resolved before
-// the ReplaceAttr function was called.
-func (s *Source) group() Value {
- var as []Attr
- if s.Function != "" {
- as = append(as, String("function", s.Function))
- }
- if s.File != "" {
- as = append(as, String("file", s.File))
- }
- if s.Line != 0 {
- as = append(as, Int("line", s.Line))
- }
- return GroupValue(as...)
-}
-
-// source returns a Source for the log event.
-// If the Record was created without the necessary information,
-// or if the location is unavailable, it returns a non-nil *Source
-// with zero fields.
-func (r Record) source() *Source {
- fs := runtime.CallersFrames([]uintptr{r.PC})
- f, _ := fs.Next()
- return &Source{
- Function: f.Function,
- File: f.File,
- Line: f.Line,
- }
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/text_handler.go b/tools/vendor/golang.org/x/exp/slog/text_handler.go
deleted file mode 100644
index 75b66b716..000000000
--- a/tools/vendor/golang.org/x/exp/slog/text_handler.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "encoding"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "unicode"
- "unicode/utf8"
-)
-
-// TextHandler is a Handler that writes Records to an io.Writer as a
-// sequence of key=value pairs separated by spaces and followed by a newline.
-type TextHandler struct {
- *commonHandler
-}
-
-// NewTextHandler creates a TextHandler that writes to w,
-// using the given options.
-// If opts is nil, the default options are used.
-func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
- if opts == nil {
- opts = &HandlerOptions{}
- }
- return &TextHandler{
- &commonHandler{
- json: false,
- w: w,
- opts: *opts,
- },
- }
-}
-
-// Enabled reports whether the handler handles records at the given level.
-// The handler ignores records whose level is lower.
-func (h *TextHandler) Enabled(_ context.Context, level Level) bool {
- return h.commonHandler.enabled(level)
-}
-
-// WithAttrs returns a new TextHandler whose attributes consists
-// of h's attributes followed by attrs.
-func (h *TextHandler) WithAttrs(attrs []Attr) Handler {
- return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
-}
-
-func (h *TextHandler) WithGroup(name string) Handler {
- return &TextHandler{commonHandler: h.commonHandler.withGroup(name)}
-}
-
-// Handle formats its argument Record as a single line of space-separated
-// key=value items.
-//
-// If the Record's time is zero, the time is omitted.
-// Otherwise, the key is "time"
-// and the value is output in RFC3339 format with millisecond precision.
-//
-// If the Record's level is zero, the level is omitted.
-// Otherwise, the key is "level"
-// and the value of [Level.String] is output.
-//
-// If the AddSource option is set and source information is available,
-// the key is "source" and the value is output as FILE:LINE.
-//
-// The message's key is "msg".
-//
-// To modify these or other attributes, or remove them from the output, use
-// [HandlerOptions.ReplaceAttr].
-//
-// If a value implements [encoding.TextMarshaler], the result of MarshalText is
-// written. Otherwise, the result of fmt.Sprint is written.
-//
-// Keys and values are quoted with [strconv.Quote] if they contain Unicode space
-// characters, non-printing characters, '"' or '='.
-//
-// Keys inside groups consist of components (keys or group names) separated by
-// dots. No further escaping is performed.
-// Thus there is no way to determine from the key "a.b.c" whether there
-// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c",
-// or single group "a" and a key "b.c".
-// If it is necessary to reconstruct the group structure of a key
-// even in the presence of dots inside components, use
-// [HandlerOptions.ReplaceAttr] to encode that information in the key.
-//
-// Each call to Handle results in a single serialized call to
-// io.Writer.Write.
-func (h *TextHandler) Handle(_ context.Context, r Record) error {
- return h.commonHandler.handle(r)
-}
-
-func appendTextValue(s *handleState, v Value) error {
- switch v.Kind() {
- case KindString:
- s.appendString(v.str())
- case KindTime:
- s.appendTime(v.time())
- case KindAny:
- if tm, ok := v.any.(encoding.TextMarshaler); ok {
- data, err := tm.MarshalText()
- if err != nil {
- return err
- }
- // TODO: avoid the conversion to string.
- s.appendString(string(data))
- return nil
- }
- if bs, ok := byteSlice(v.any); ok {
- // As of Go 1.19, this only allocates for strings longer than 32 bytes.
- s.buf.WriteString(strconv.Quote(string(bs)))
- return nil
- }
- s.appendString(fmt.Sprintf("%+v", v.Any()))
- default:
- *s.buf = v.append(*s.buf)
- }
- return nil
-}
-
-// byteSlice returns its argument as a []byte if the argument's
-// underlying type is []byte, along with a second return value of true.
-// Otherwise it returns nil, false.
-func byteSlice(a any) ([]byte, bool) {
- if bs, ok := a.([]byte); ok {
- return bs, true
- }
- // Like Printf's %s, we allow both the slice type and the byte element type to be named.
- t := reflect.TypeOf(a)
- if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
- return reflect.ValueOf(a).Bytes(), true
- }
- return nil, false
-}
-
-func needsQuoting(s string) bool {
- if len(s) == 0 {
- return true
- }
- for i := 0; i < len(s); {
- b := s[i]
- if b < utf8.RuneSelf {
- // Quote anything except a backslash that would need quoting in a
- // JSON string, as well as space and '='
- if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) {
- return true
- }
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) {
- return true
- }
- i += size
- }
- return false
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/value.go b/tools/vendor/golang.org/x/exp/slog/value.go
deleted file mode 100644
index 3550c46fc..000000000
--- a/tools/vendor/golang.org/x/exp/slog/value.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "fmt"
- "math"
- "runtime"
- "strconv"
- "strings"
- "time"
- "unsafe"
-
- "golang.org/x/exp/slices"
-)
-
-// A Value can represent any Go value, but unlike type any,
-// it can represent most small values without an allocation.
-// The zero Value corresponds to nil.
-type Value struct {
- _ [0]func() // disallow ==
- // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration,
- // the string length for KindString, and nanoseconds since the epoch for KindTime.
- num uint64
- // If any is of type Kind, then the value is in num as described above.
- // If any is of type *time.Location, then the Kind is Time and time.Time value
- // can be constructed from the Unix nanos in num and the location (monotonic time
- // is not preserved).
- // If any is of type stringptr, then the Kind is String and the string value
- // consists of the length in num and the pointer in any.
- // Otherwise, the Kind is Any and any is the value.
- // (This implies that Attrs cannot store values of type Kind, *time.Location
- // or stringptr.)
- any any
-}
-
-// Kind is the kind of a Value.
-type Kind int
-
-// The following list is sorted alphabetically, but it's also important that
-// KindAny is 0 so that a zero Value represents nil.
-
-const (
- KindAny Kind = iota
- KindBool
- KindDuration
- KindFloat64
- KindInt64
- KindString
- KindTime
- KindUint64
- KindGroup
- KindLogValuer
-)
-
-var kindStrings = []string{
- "Any",
- "Bool",
- "Duration",
- "Float64",
- "Int64",
- "String",
- "Time",
- "Uint64",
- "Group",
- "LogValuer",
-}
-
-func (k Kind) String() string {
- if k >= 0 && int(k) < len(kindStrings) {
- return kindStrings[k]
- }
- return ""
-}
-
-// Unexported version of Kind, just so we can store Kinds in Values.
-// (No user-provided value has this type.)
-type kind Kind
-
-// Kind returns v's Kind.
-func (v Value) Kind() Kind {
- switch x := v.any.(type) {
- case Kind:
- return x
- case stringptr:
- return KindString
- case timeLocation:
- return KindTime
- case groupptr:
- return KindGroup
- case LogValuer:
- return KindLogValuer
- case kind: // a kind is just a wrapper for a Kind
- return KindAny
- default:
- return KindAny
- }
-}
-
-//////////////// Constructors
-
-// IntValue returns a Value for an int.
-func IntValue(v int) Value {
- return Int64Value(int64(v))
-}
-
-// Int64Value returns a Value for an int64.
-func Int64Value(v int64) Value {
- return Value{num: uint64(v), any: KindInt64}
-}
-
-// Uint64Value returns a Value for a uint64.
-func Uint64Value(v uint64) Value {
- return Value{num: v, any: KindUint64}
-}
-
-// Float64Value returns a Value for a floating-point number.
-func Float64Value(v float64) Value {
- return Value{num: math.Float64bits(v), any: KindFloat64}
-}
-
-// BoolValue returns a Value for a bool.
-func BoolValue(v bool) Value {
- u := uint64(0)
- if v {
- u = 1
- }
- return Value{num: u, any: KindBool}
-}
-
-// Unexported version of *time.Location, just so we can store *time.Locations in
-// Values. (No user-provided value has this type.)
-type timeLocation *time.Location
-
-// TimeValue returns a Value for a time.Time.
-// It discards the monotonic portion.
-func TimeValue(v time.Time) Value {
- if v.IsZero() {
- // UnixNano on the zero time is undefined, so represent the zero time
- // with a nil *time.Location instead. time.Time.Location method never
- // returns nil, so a Value with any == timeLocation(nil) cannot be
- // mistaken for any other Value, time.Time or otherwise.
- return Value{any: timeLocation(nil)}
- }
- return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())}
-}
-
-// DurationValue returns a Value for a time.Duration.
-func DurationValue(v time.Duration) Value {
- return Value{num: uint64(v.Nanoseconds()), any: KindDuration}
-}
-
-// AnyValue returns a Value for the supplied value.
-//
-// If the supplied value is of type Value, it is returned
-// unmodified.
-//
-// Given a value of one of Go's predeclared string, bool, or
-// (non-complex) numeric types, AnyValue returns a Value of kind
-// String, Bool, Uint64, Int64, or Float64. The width of the
-// original numeric type is not preserved.
-//
-// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
-// KindTime or KindDuration. The monotonic time is not preserved.
-//
-// For nil, or values of all other types, including named types whose
-// underlying type is numeric, AnyValue returns a value of kind KindAny.
-func AnyValue(v any) Value {
- switch v := v.(type) {
- case string:
- return StringValue(v)
- case int:
- return Int64Value(int64(v))
- case uint:
- return Uint64Value(uint64(v))
- case int64:
- return Int64Value(v)
- case uint64:
- return Uint64Value(v)
- case bool:
- return BoolValue(v)
- case time.Duration:
- return DurationValue(v)
- case time.Time:
- return TimeValue(v)
- case uint8:
- return Uint64Value(uint64(v))
- case uint16:
- return Uint64Value(uint64(v))
- case uint32:
- return Uint64Value(uint64(v))
- case uintptr:
- return Uint64Value(uint64(v))
- case int8:
- return Int64Value(int64(v))
- case int16:
- return Int64Value(int64(v))
- case int32:
- return Int64Value(int64(v))
- case float64:
- return Float64Value(v)
- case float32:
- return Float64Value(float64(v))
- case []Attr:
- return GroupValue(v...)
- case Kind:
- return Value{any: kind(v)}
- case Value:
- return v
- default:
- return Value{any: v}
- }
-}
-
-//////////////// Accessors
-
-// Any returns v's value as an any.
-func (v Value) Any() any {
- switch v.Kind() {
- case KindAny:
- if k, ok := v.any.(kind); ok {
- return Kind(k)
- }
- return v.any
- case KindLogValuer:
- return v.any
- case KindGroup:
- return v.group()
- case KindInt64:
- return int64(v.num)
- case KindUint64:
- return v.num
- case KindFloat64:
- return v.float()
- case KindString:
- return v.str()
- case KindBool:
- return v.bool()
- case KindDuration:
- return v.duration()
- case KindTime:
- return v.time()
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
-}
-
-// Int64 returns v's value as an int64. It panics
-// if v is not a signed integer.
-func (v Value) Int64() int64 {
- if g, w := v.Kind(), KindInt64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return int64(v.num)
-}
-
-// Uint64 returns v's value as a uint64. It panics
-// if v is not an unsigned integer.
-func (v Value) Uint64() uint64 {
- if g, w := v.Kind(), KindUint64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.num
-}
-
-// Bool returns v's value as a bool. It panics
-// if v is not a bool.
-func (v Value) Bool() bool {
- if g, w := v.Kind(), KindBool; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.bool()
-}
-
-func (v Value) bool() bool {
- return v.num == 1
-}
-
-// Duration returns v's value as a time.Duration. It panics
-// if v is not a time.Duration.
-func (v Value) Duration() time.Duration {
- if g, w := v.Kind(), KindDuration; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
-
- return v.duration()
-}
-
-func (v Value) duration() time.Duration {
- return time.Duration(int64(v.num))
-}
-
-// Float64 returns v's value as a float64. It panics
-// if v is not a float64.
-func (v Value) Float64() float64 {
- if g, w := v.Kind(), KindFloat64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
-
- return v.float()
-}
-
-func (v Value) float() float64 {
- return math.Float64frombits(v.num)
-}
-
-// Time returns v's value as a time.Time. It panics
-// if v is not a time.Time.
-func (v Value) Time() time.Time {
- if g, w := v.Kind(), KindTime; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.time()
-}
-
-func (v Value) time() time.Time {
- loc := v.any.(timeLocation)
- if loc == nil {
- return time.Time{}
- }
- return time.Unix(0, int64(v.num)).In(loc)
-}
-
-// LogValuer returns v's value as a LogValuer. It panics
-// if v is not a LogValuer.
-func (v Value) LogValuer() LogValuer {
- return v.any.(LogValuer)
-}
-
-// Group returns v's value as a []Attr.
-// It panics if v's Kind is not KindGroup.
-func (v Value) Group() []Attr {
- if sp, ok := v.any.(groupptr); ok {
- return unsafe.Slice((*Attr)(sp), v.num)
- }
- panic("Group: bad kind")
-}
-
-func (v Value) group() []Attr {
- return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num)
-}
-
-//////////////// Other
-
-// Equal reports whether v and w represent the same Go value.
-func (v Value) Equal(w Value) bool {
- k1 := v.Kind()
- k2 := w.Kind()
- if k1 != k2 {
- return false
- }
- switch k1 {
- case KindInt64, KindUint64, KindBool, KindDuration:
- return v.num == w.num
- case KindString:
- return v.str() == w.str()
- case KindFloat64:
- return v.float() == w.float()
- case KindTime:
- return v.time().Equal(w.time())
- case KindAny, KindLogValuer:
- return v.any == w.any // may panic if non-comparable
- case KindGroup:
- return slices.EqualFunc(v.group(), w.group(), Attr.Equal)
- default:
- panic(fmt.Sprintf("bad kind: %s", k1))
- }
-}
-
-// append appends a text representation of v to dst.
-// v is formatted as with fmt.Sprint.
-func (v Value) append(dst []byte) []byte {
- switch v.Kind() {
- case KindString:
- return append(dst, v.str()...)
- case KindInt64:
- return strconv.AppendInt(dst, int64(v.num), 10)
- case KindUint64:
- return strconv.AppendUint(dst, v.num, 10)
- case KindFloat64:
- return strconv.AppendFloat(dst, v.float(), 'g', -1, 64)
- case KindBool:
- return strconv.AppendBool(dst, v.bool())
- case KindDuration:
- return append(dst, v.duration().String()...)
- case KindTime:
- return append(dst, v.time().String()...)
- case KindGroup:
- return fmt.Append(dst, v.group())
- case KindAny, KindLogValuer:
- return fmt.Append(dst, v.any)
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
-}
-
-// A LogValuer is any Go value that can convert itself into a Value for logging.
-//
-// This mechanism may be used to defer expensive operations until they are
-// needed, or to expand a single value into a sequence of components.
-type LogValuer interface {
- LogValue() Value
-}
-
-const maxLogValues = 100
-
-// Resolve repeatedly calls LogValue on v while it implements LogValuer,
-// and returns the result.
-// If v resolves to a group, the group's attributes' values are not recursively
-// resolved.
-// If the number of LogValue calls exceeds a threshold, a Value containing an
-// error is returned.
-// Resolve's return value is guaranteed not to be of Kind KindLogValuer.
-func (v Value) Resolve() (rv Value) {
- orig := v
- defer func() {
- if r := recover(); r != nil {
- rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5)))
- }
- }()
-
- for i := 0; i < maxLogValues; i++ {
- if v.Kind() != KindLogValuer {
- return v
- }
- v = v.LogValuer().LogValue()
- }
- err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any())
- return AnyValue(err)
-}
-
-func stack(skip, nFrames int) string {
- pcs := make([]uintptr, nFrames+1)
- n := runtime.Callers(skip+1, pcs)
- if n == 0 {
- return "(no stack)"
- }
- frames := runtime.CallersFrames(pcs[:n])
- var b strings.Builder
- i := 0
- for {
- frame, more := frames.Next()
- fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line)
- if !more {
- break
- }
- i++
- if i >= nFrames {
- fmt.Fprintf(&b, "(rest of stack elided)\n")
- break
- }
- }
- return b.String()
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/value_119.go b/tools/vendor/golang.org/x/exp/slog/value_119.go
deleted file mode 100644
index 29b0d7329..000000000
--- a/tools/vendor/golang.org/x/exp/slog/value_119.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19 && !go1.20
-
-package slog
-
-import (
- "reflect"
- "unsafe"
-)
-
-type (
- stringptr unsafe.Pointer // used in Value.any when the Value is a string
- groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr
-)
-
-// StringValue returns a new Value for a string.
-func StringValue(value string) Value {
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&value))
- return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)}
-}
-
-func (v Value) str() string {
- var s string
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
- hdr.Data = uintptr(v.any.(stringptr))
- hdr.Len = int(v.num)
- return s
-}
-
-// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
-// the methods Int64, Float64, and so on, which panic if v is of the
-// wrong kind, String never panics.
-func (v Value) String() string {
- if sp, ok := v.any.(stringptr); ok {
- // Inlining this code makes a huge difference.
- var s string
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
- hdr.Data = uintptr(sp)
- hdr.Len = int(v.num)
- return s
- }
- return string(v.append(nil))
-}
-
-// GroupValue returns a new Value for a list of Attrs.
-// The caller must not subsequently mutate the argument slice.
-func GroupValue(as ...Attr) Value {
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as))
- return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)}
-}
diff --git a/tools/vendor/golang.org/x/exp/slog/value_120.go b/tools/vendor/golang.org/x/exp/slog/value_120.go
deleted file mode 100644
index f7d4c0932..000000000
--- a/tools/vendor/golang.org/x/exp/slog/value_120.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package slog
-
-import "unsafe"
-
-type (
- stringptr *byte // used in Value.any when the Value is a string
- groupptr *Attr // used in Value.any when the Value is a []Attr
-)
-
-// StringValue returns a new Value for a string.
-func StringValue(value string) Value {
- return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))}
-}
-
-// GroupValue returns a new Value for a list of Attrs.
-// The caller must not subsequently mutate the argument slice.
-func GroupValue(as ...Attr) Value {
- return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))}
-}
-
-// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
-// the methods Int64, Float64, and so on, which panic if v is of the
-// wrong kind, String never panics.
-func (v Value) String() string {
- if sp, ok := v.any.(stringptr); ok {
- return unsafe.String(sp, v.num)
- }
- return string(v.append(nil))
-}
-
-func (v Value) str() string {
- return unsafe.String(v.any.(stringptr), v.num)
-}
diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
index 58934f766..5c8dbbb7a 100644
--- a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
+++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -183,7 +183,7 @@ type application struct {
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
// convert typed nil into untyped nil
- if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
+ if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() {
n = nil
}
diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go
index ca71e3e10..c820b2084 100644
--- a/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go
+++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -8,4 +8,6 @@ import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
// Deprecated: use [ast.Unparen].
+//
+//go:fix inline
func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
diff --git a/tools/vendor/golang.org/x/tools/go/packages/packages.go b/tools/vendor/golang.org/x/tools/go/packages/packages.go
index c3a59b8eb..6665a04c1 100644
--- a/tools/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/tools/vendor/golang.org/x/tools/go/packages/packages.go
@@ -141,6 +141,8 @@ const (
LoadAllSyntax = LoadSyntax | NeedDeps
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+ //
+ //go:fix inline
NeedExportsFile = NeedExportFile
)
@@ -161,7 +163,7 @@ type Config struct {
// If the user provides a logger, debug logging is enabled.
// If the GOPACKAGESDEBUG environment variable is set to true,
// but the logger is nil, default to log.Printf.
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
// Dir is the directory in which to run the build system's query tool
// that provides information about the packages.
@@ -564,13 +566,13 @@ type ModuleError struct {
}
func init() {
- packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
+ packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError {
return p.(*Package).depsErrors
}
- packagesinternal.SetModFile = func(config interface{}, value string) {
+ packagesinternal.SetModFile = func(config any, value string) {
config.(*Config).modFile = value
}
- packagesinternal.SetModFlag = func(config interface{}, value string) {
+ packagesinternal.SetModFlag = func(config any, value string) {
config.(*Config).modFlag = value
}
packagesinternal.TypecheckCgo = int(typecheckCgo)
@@ -739,7 +741,7 @@ func newLoader(cfg *Config) *loader {
if debug {
ld.Config.Logf = log.Printf
} else {
- ld.Config.Logf = func(format string, args ...interface{}) {}
+ ld.Config.Logf = func(format string, args ...any) {}
}
}
if ld.Config.Mode == 0 {
diff --git a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go
index 43261147c..b6d542c64 100644
--- a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -389,8 +389,13 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 {
// path, and whether or not it is a package-level typename. It
// is rare for a package to define multiple local types with
// the same name.)
- hash := uintptr(unsafe.Pointer(tname))
- return uint32(hash ^ (hash >> 32))
+ ptr := uintptr(unsafe.Pointer(tname))
+ if unsafe.Sizeof(ptr) == 8 {
+ hash := uint64(ptr)
+ return uint32(hash ^ (hash >> 32))
+ } else {
+ return uint32(ptr)
+ }
}
// shallowHash computes a hash of t without looking at any of its
diff --git a/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go b/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go
index a02206e30..4cfa51b61 100644
--- a/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go
+++ b/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go
@@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
}
// Get can be used to get a label for the key from a label.Map.
-func (k *Value) Get(lm label.Map) interface{} {
+func (k *Value) Get(lm label.Map) any {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
@@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} {
}
// From can be used to get a value from a Label.
-func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
+func (k *Value) From(t label.Label) any { return t.UnpackValue() }
// Of creates a new Label with this key and the supplied value.
-func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
+func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) }
// Tag represents a key for tagging labels that have no value.
// These are used when the existence of the label is the entire information it
diff --git a/tools/vendor/golang.org/x/tools/internal/event/label/label.go b/tools/vendor/golang.org/x/tools/internal/event/label/label.go
index 0f526e1f9..7c00ca2a6 100644
--- a/tools/vendor/golang.org/x/tools/internal/event/label/label.go
+++ b/tools/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -32,7 +32,7 @@ type Key interface {
type Label struct {
key Key
packed uint64
- untyped interface{}
+ untyped any
}
// Map is the interface to a collection of Labels indexed by key.
@@ -76,13 +76,13 @@ type mapChain struct {
// OfValue creates a new label from the key and value.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
-func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
+func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} }
// UnpackValue assumes the label was built using LabelOfValue and returns the value
// that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
-func (t Label) UnpackValue() interface{} { return t.untyped }
+func (t Label) UnpackValue() any { return t.untyped }
// Of64 creates a new label from a key and a uint64. This is often
// used for non uint64 values that can be packed into a uint64.
diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index d79a605ed..734c46198 100644
--- a/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -14,7 +14,7 @@ import (
"sync"
)
-func errorf(format string, args ...interface{}) {
+func errorf(format string, args ...any) {
panic(fmt.Sprintf(format, args...))
}
diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 7dfc31a37..253d6493c 100644
--- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -310,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt
}
// ReportFunc is the type of a function used to report formatted bugs.
-type ReportFunc = func(string, ...interface{})
+type ReportFunc = func(string, ...any)
// Current bundled export format version. Increase with each format change.
// 0: initial implementation
@@ -597,7 +597,7 @@ type filePositions struct {
needed []uint64 // unordered list of needed file offsets
}
-func (p *iexporter) trace(format string, args ...interface{}) {
+func (p *iexporter) trace(format string, args ...any) {
if !trace {
// Call sites should also be guarded, but having this check here allows
// easily enabling/disabling debug trace statements.
@@ -1583,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) }
// "internalErrorf" as the former is used for bugs, whose cause is
// internal inconsistency, whereas the latter is used for ordinary
// situations like bad input, whose cause is external.
-func internalErrorf(format string, args ...interface{}) error {
+func internalErrorf(format string, args ...any) error {
return internalError(fmt.Sprintf(format, args...))
}
diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 129439271..bc6c9741e 100644
--- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -400,7 +400,7 @@ type iimporter struct {
indent int // for tracing support
}
-func (p *iimporter) trace(format string, args ...interface{}) {
+func (p *iimporter) trace(format string, args ...any) {
if !trace {
// Call sites should also be guarded, but having this check here allows
// easily enabling/disabling debug trace statements.
diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 522287d18..37b4a39e9 100644
--- a/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -574,7 +574,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
typesinternal.SetVarKind(recv, typesinternal.RecvVar)
- methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic()))
}
embeds := make([]types.Type, iface.NumEmbeddeds())
diff --git a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
index 836151551..984b79c2a 100644
--- a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
+++ b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -22,7 +22,7 @@ import (
// Options controls the behavior of a Walk call.
type Options struct {
// If Logf is non-nil, debug logging is enabled through this function.
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
// Search module caches. Also disables legacy goimports ignore rules.
ModulesEnabled bool
@@ -81,7 +81,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if opts.Logf == nil {
- opts.Logf = func(format string, args ...interface{}) {}
+ opts.Logf = func(format string, args ...any) {}
}
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
opts.Logf("skipping nonexistent directory: %v", root.Path)
diff --git a/tools/vendor/golang.org/x/tools/internal/imports/fix.go b/tools/vendor/golang.org/x/tools/internal/imports/fix.go
index bf6b0aadd..737a9bfae 100644
--- a/tools/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/tools/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -559,7 +559,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
return err
}
apply(fset, f, fixes)
- return err
+ return nil
}
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
@@ -1030,7 +1030,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
//
// For gopls, we can optionally explicitly choose a resolver type, since we
// already know the view type.
- if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
+ if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") {
e.resolver = newGopathResolver(e)
e.logf("created gopath resolver")
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
diff --git a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 784605914..25ebab663 100644
--- a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -17,4 +17,4 @@ var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
var SetModFlag = func(config any, value string) {}
-var SetModFile = func(config interface{}, value string) {}
+var SetModFile = func(config any, value string) {}
diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 000000000..7cca431cd
--- /dev/null
+++ b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,359 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+ name string
+ deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+ {"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"},
+ {"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"},
+ {"bufio", "\x03k}E\x13"},
+ {"bytes", "n+R\x03\fG\x02\x02"},
+ {"cmp", ""},
+ {"compress/bzip2", "\x02\x02\xe7\x01B"},
+ {"compress/flate", "\x02l\x03z\r\x024\x01\x03"},
+ {"compress/gzip", "\x02\x04a\a\x03\x15eT"},
+ {"compress/lzw", "\x02l\x03z"},
+ {"compress/zlib", "\x02\x04a\a\x03\x13\x01f"},
+ {"container/heap", "\xae\x02"},
+ {"container/list", ""},
+ {"container/ring", ""},
+ {"context", "n\\h\x01\f"},
+ {"crypto", "\x84\x01gD"},
+ {"crypto/aes", "\x10\n\a\x8e\x02"},
+ {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"},
+ {"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"},
+ {"crypto/dsa", "@\x04*}\x0e"},
+ {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"},
+ {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"},
+ {"crypto/elliptic", "0>}\x0e9"},
+ {"crypto/fips140", " \x05\x91\x01"},
+ {"crypto/hkdf", "-\x12\x01.\x16"},
+ {"crypto/hmac", "\x1a\x14\x11\x01\x113"},
+ {"crypto/internal/boring", "\x0e\x02\rg"},
+ {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
+ {"crypto/internal/boring/bcache", "\xb3\x02\x12"},
+ {"crypto/internal/boring/sig", ""},
+ {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"},
+ {"crypto/internal/entropy", "E"},
+ {"crypto/internal/fips140", ">0}9\f\x15"},
+ {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"},
+ {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"},
+ {"crypto/internal/fips140/alias", "\xc5\x02"},
+ {"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"},
+ {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "%\xff\x01!"},
+ {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"},
+ {"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"},
+ {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"},
+ {"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"},
+ {"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"},
+ {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"},
+ {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
+ {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
+ {"crypto/internal/fips140/ssh", " \x05"},
+ {"crypto/internal/fips140/subtle", "#\x19\xbe\x01"},
+ {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"},
+ {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"},
+ {"crypto/internal/fips140deps", ""},
+ {"crypto/internal/fips140deps/byteorder", "\x9a\x01"},
+ {"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xb6\x01"},
+ {"crypto/internal/fips140hash", "5\x1a5\xc1\x01"},
+ {"crypto/internal/fips140only", "'\r\x01\x01N25"},
+ {"crypto/internal/fips140test", ""},
+ {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"},
+ {"crypto/internal/impl", "\xb0\x02"},
+ {"crypto/internal/randutil", "\xeb\x01\x12"},
+ {"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "n"},
+ {"crypto/md5", "\x0e2.\x16\x16`"},
+ {"crypto/mlkem", "/"},
+ {"crypto/pbkdf2", "2\r\x01.\x16"},
+ {"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"},
+ {"crypto/rc4", "#\x1d.\xc1\x01"},
+ {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"},
+ {"crypto/sha1", "\x0e\f&.\x16\x16\x14L"},
+ {"crypto/sha256", "\x0e\f\x1aP"},
+ {"crypto/sha3", "\x0e'O\xc1\x01"},
+ {"crypto/sha512", "\x0e\f\x1cN"},
+ {"crypto/subtle", "8\x98\x01T"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"},
+ {"crypto/tls/internal/fips140tls", " \x93\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"},
+ {"crypto/x509/pkix", "d\x06\a\x88\x01F"},
+ {"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"},
+ {"database/sql/driver", "\ra\x03\xae\x01\x10\x10"},
+ {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"},
+ {"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"},
+ {"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"},
+ {"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"},
+ {"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"},
+ {"debug/plan9obj", "g\a\x03`\x1a,"},
+ {"embed", "n+:\x18\x01S"},
+ {"embed/internal/embedtest", ""},
+ {"encoding", ""},
+ {"encoding/ascii85", "\xeb\x01D"},
+ {"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"},
+ {"encoding/base32", "\xeb\x01B\x02"},
+ {"encoding/base64", "\x9a\x01QB\x02"},
+ {"encoding/binary", "n}\r'\x0e\x05"},
+ {"encoding/csv", "\x02\x01k\x03zE\x11\x02"},
+ {"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"},
+ {"encoding/hex", "n\x03zB\x03"},
+ {"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"},
+ {"encoding/pem", "\x03c\b}B\x03"},
+ {"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"},
+ {"errors", "\xca\x01{"},
+ {"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"},
+ {"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"},
+ {"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"},
+ {"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"},
+ {"go/ast/internal/tests", ""},
+ {"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"},
+ {"go/build/constraint", "n\xc1\x01\x01\x11\x02"},
+ {"go/constant", "q\x10w\x01\x015\x01\x02\x11"},
+ {"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"},
+ {"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"},
+ {"go/format", "\x03n\x01\f\x01\x02jE"},
+ {"go/importer", "t\a\x01\x01\x04\x01i9"},
+ {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"},
+ {"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"},
+ {"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"},
+ {"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"},
+ {"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"},
+ {"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"},
+ {"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"},
+ {"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"},
+ {"go/version", "\xbb\x01u"},
+ {"hash", "\xeb\x01"},
+ {"hash/adler32", "n\x16\x16"},
+ {"hash/crc32", "n\x16\x16\x14\x84\x01\x01"},
+ {"hash/crc64", "n\x16\x16\x98\x01"},
+ {"hash/fnv", "n\x16\x16`"},
+ {"hash/maphash", "\x95\x01\x05\x1b\x03@M"},
+ {"html", "\xb0\x02\x02\x11"},
+ {"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"},
+ {"image", "\x02l\x1f^\x0f5\x03\x01"},
+ {"image/color", ""},
+ {"image/color/palette", "\x8d\x01"},
+ {"image/draw", "\x8c\x01\x01\x04"},
+ {"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"},
+ {"image/internal/imageutil", "\x8c\x01"},
+ {"image/jpeg", "\x02l\x1e\x01\x04Z"},
+ {"image/png", "\x02\a^\n\x13\x02\x06\x01^D"},
+ {"index/suffixarray", "\x03d\a}\r*\v\x01"},
+ {"internal/abi", "\xb5\x01\x90\x01"},
+ {"internal/asan", "\xc5\x02"},
+ {"internal/bisect", "\xa4\x02\x0e\x01"},
+ {"internal/buildcfg", "qG_\x06\x02\x05\v\x01"},
+ {"internal/bytealg", "\xae\x01\x97\x01"},
+ {"internal/byteorder", ""},
+ {"internal/cfg", ""},
+ {"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"},
+ {"internal/copyright", ""},
+ {"internal/coverage", ""},
+ {"internal/coverage/calloc", ""},
+ {"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"},
+ {"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"},
+ {"internal/coverage/cmerge", "q-Z"},
+ {"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"},
+ {"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"},
+ {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"},
+ {"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."},
+ {"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"},
+ {"internal/coverage/rtcov", "\xc5\x02"},
+ {"internal/coverage/slicereader", "g\nzZ"},
+ {"internal/coverage/slicewriter", "qz"},
+ {"internal/coverage/stringtab", "q8\x04>"},
+ {"internal/coverage/test", ""},
+ {"internal/coverage/uleb128", ""},
+ {"internal/cpu", "\xc5\x02"},
+ {"internal/dag", "\x04m\xbc\x01\x03"},
+ {"internal/diff", "\x03n\xbd\x01\x02"},
+ {"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"},
+ {"internal/filepathlite", "n+:\x19A"},
+ {"internal/fmtsort", "\x04\x9b\x02\x0e"},
+ {"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"},
+ {"internal/goarch", ""},
+ {"internal/godebug", "\x97\x01 {\x01\x12"},
+ {"internal/godebugs", ""},
+ {"internal/goexperiment", ""},
+ {"internal/goos", ""},
+ {"internal/goroot", "\x97\x02\x01\x05\x13\x02"},
+ {"internal/gover", "\x04"},
+ {"internal/goversion", ""},
+ {"internal/itoa", ""},
+ {"internal/lazyregexp", "\x97\x02\v\x0e\x02"},
+ {"internal/lazytemplate", "\xeb\x01,\x19\x02\v"},
+ {"internal/msan", "\xc5\x02"},
+ {"internal/nettrace", ""},
+ {"internal/obscuretestdata", "f\x85\x01,"},
+ {"internal/oserror", "n"},
+ {"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"},
+ {"internal/platform", ""},
+ {"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"},
+ {"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"},
+ {"internal/profilerecord", ""},
+ {"internal/race", "\x95\x01\xb0\x01"},
+ {"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"},
+ {"unsafe", ""},
+ {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"},
+ {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"},
+ {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"},
+ {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "n"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"},
+ {"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"},
+ {"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"},
+ {"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03k}X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"},
+ {"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"},
+ {"weak", "\x95\x01\x8f\x01!"},
+}
diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/import.go b/tools/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 000000000..f6909878a
--- /dev/null
+++ b/tools/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+ "encoding/binary"
+ "iter"
+ "slices"
+ "strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !yield(deps[depIndex].name) {
+ return
+ }
+ data = data[n:]
+ }
+ }
+ }
+ }
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var seen [1 + len(deps)/8]byte // bit set of seen packages
+ var visit func(i int) bool
+ visit = func(i int) bool {
+ bit := byte(1) << (i % 8)
+ if seen[i/8]&bit == 0 {
+ seen[i/8] |= bit
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !visit(int(depIndex)) {
+ return false
+ }
+ data = data[n:]
+ }
+ if !yield(deps[i].name) {
+ return false
+ }
+ }
+ return true
+ }
+ if !visit(i) {
+ return
+ }
+ }
+ }
+ }
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+ return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+ return strings.Compare(p.name, n)
+ })
+}
diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index 9f0b871ff..00776a31b 100644
--- a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -2151,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Type).String", Method, 0},
{"(Version).GoString", Method, 0},
{"(Version).String", Method, 0},
+ {"(VersionIndex).Index", Method, 24},
+ {"(VersionIndex).IsHidden", Method, 24},
{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
{"COMPRESS_HIOS", Const, 6},
{"COMPRESS_HIPROC", Const, 6},
@@ -3834,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{
{"SymType", Type, 0},
{"SymVis", Type, 0},
{"Symbol", Type, 0},
+ {"Symbol.HasVersion", Field, 24},
{"Symbol.Info", Field, 0},
{"Symbol.Library", Field, 13},
{"Symbol.Name", Field, 0},
@@ -3843,18 +3846,12 @@ var PackageSymbols = map[string][]Symbol{
{"Symbol.Value", Field, 0},
{"Symbol.Version", Field, 13},
{"Symbol.VersionIndex", Field, 24},
- {"Symbol.VersionScope", Field, 24},
- {"SymbolVersionScope", Type, 24},
{"Type", Type, 0},
{"VER_FLG_BASE", Const, 24},
{"VER_FLG_INFO", Const, 24},
{"VER_FLG_WEAK", Const, 24},
{"Version", Type, 0},
- {"VersionScopeGlobal", Const, 24},
- {"VersionScopeHidden", Const, 24},
- {"VersionScopeLocal", Const, 24},
- {"VersionScopeNone", Const, 24},
- {"VersionScopeSpecific", Const, 24},
+ {"VersionIndex", Type, 24},
},
"debug/gosym": {
{"(*DecodingError).Error", Method, 0},
@@ -7122,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{
{"FormatFileInfo", Func, 21},
{"Glob", Func, 16},
{"GlobFS", Type, 16},
+ {"Lstat", Func, 25},
{"ModeAppend", Const, 16},
{"ModeCharDevice", Const, 16},
{"ModeDevice", Const, 16},
@@ -7146,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{
{"ReadDirFile", Type, 16},
{"ReadFile", Func, 16},
{"ReadFileFS", Type, 16},
+ {"ReadLink", Func, 25},
+ {"ReadLinkFS", Type, 25},
{"SkipAll", Var, 20},
{"SkipDir", Var, 16},
{"Stat", Func, 16},
@@ -9149,6 +9149,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*ProcessState).SysUsage", Method, 0},
{"(*ProcessState).SystemTime", Method, 0},
{"(*ProcessState).UserTime", Method, 0},
+ {"(*Root).Chmod", Method, 25},
+ {"(*Root).Chown", Method, 25},
{"(*Root).Close", Method, 24},
{"(*Root).Create", Method, 24},
{"(*Root).FS", Method, 24},
@@ -16757,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{
},
"testing/fstest": {
{"(MapFS).Glob", Method, 16},
+ {"(MapFS).Lstat", Method, 25},
{"(MapFS).Open", Method, 16},
{"(MapFS).ReadDir", Method, 16},
{"(MapFS).ReadFile", Method, 16},
+ {"(MapFS).ReadLink", Method, 25},
{"(MapFS).Stat", Method, 16},
{"(MapFS).Sub", Method, 16},
{"MapFS", Type, 16},
diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
index 98904017f..3d96d3bf6 100644
--- a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
+++ b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -6,7 +6,7 @@
// Package stdlib provides a table of all exported symbols in the
// standard library, along with the version at which they first
-// appeared.
+// appeared. It also provides the import graph of std packages.
package stdlib
import (
diff --git a/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
index 93c80fdc9..f49802b8e 100644
--- a/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
+++ b/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -120,7 +120,7 @@ type termSet struct {
terms termlist
}
-func indentf(depth int, format string, args ...interface{}) {
+func indentf(depth int, format string, args ...any) {
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
}
diff --git a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 345348796..edf0347ec 100644
--- a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -32,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool {
return true
}
-// ReadGo116ErrorData extracts additional information from types.Error values
+// ErrorCodeStartEnd extracts additional information from types.Error values
// generated by Go version 1.16 and later: the error code, start position, and
// end position. If all positions are valid, start <= err.Pos <= end.
//
// If the data could not be read, the final result parameter will be false.
-func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+//
+// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
+func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
var data [3]int
// By coincidence all of these fields are ints, which simplifies things.
v := reflect.ValueOf(err)
diff --git a/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md b/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 0854d298e..d9bfa6e1e 100644
--- a/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
@@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
is a great place to start. These issues are well-documented and usually can be
resolved with a single pull request.
-- If you are adding a new file, make sure it has the copyright message template
- at the top as a comment. You can copy over the message from an existing file
+- If you are adding a new file, make sure it has the copyright message template
+ at the top as a comment. You can copy over the message from an existing file
and update the year.
- The grpc package should only depend on standard Go packages and a small number
@@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a github issue if it exists.
+ and **why** it was made. Link to a GitHub issue if it exists.
-- If you want to fix formatting or style, consider whether your changes are an
- obvious improvement or might be considered a personal preference. If a style
- change is based on preference, it likely will not be accepted. If it corrects
- widely agreed-upon anti-patterns, then please do create a PR and explain the
+- If you want to fix formatting or style, consider whether your changes are an
+ obvious improvement or might be considered a personal preference. If a style
+ change is based on preference, it likely will not be accepted. If it corrects
+ widely agreed-upon anti-patterns, then please do create a PR and explain the
benefits of the change.
- Unless your PR is trivial, you should expect there will be reviewer comments
diff --git a/tools/vendor/google.golang.org/grpc/MAINTAINERS.md b/tools/vendor/google.golang.org/grpc/MAINTAINERS.md
index 6a8a07781..5d4096d46 100644
--- a/tools/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/tools/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,21 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
-- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [canguler](https://github.com/canguler), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/tools/vendor/google.golang.org/grpc/SECURITY.md b/tools/vendor/google.golang.org/grpc/SECURITY.md
index be6e10870..abab27937 100644
--- a/tools/vendor/google.golang.org/grpc/SECURITY.md
+++ b/tools/vendor/google.golang.org/grpc/SECURITY.md
@@ -1,3 +1,3 @@
# Security Policy
-For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/tools/vendor/google.golang.org/grpc/backoff/backoff.go b/tools/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b50..d7b40b7cb 100644
--- a/tools/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/tools/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/tools/vendor/google.golang.org/grpc/balancer/balancer.go b/tools/vendor/google.golang.org/grpc/balancer/balancer.go
index f391744f7..3a2092f10 100644
--- a/tools/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/tools/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
@@ -72,8 +73,21 @@ func unregisterForTesting(name string) {
delete(m, name)
}
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
@@ -116,7 +130,7 @@ type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
// If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully closed, and
+ // If it's not in the list, the connection will gracefully close, and
// a new connection will be created.
//
// This will trigger a state transition for the SubConn.
@@ -128,8 +142,11 @@ type SubConn interface {
Connect()
// GetOrBuildProducer returns a reference to the existing Producer for this
// ProducerBuilder in this SubConn, or, if one does not currently exist,
- // creates a new one and returns it. Returns a close function which must
- // be called when the Producer is no longer needed.
+ // creates a new one and returns it. Returns a close function which may be
+ // called when the Producer is no longer needed. Otherwise the producer
+ // will automatically be closed upon connection loss or subchannel close.
+ // Should only be called on a SubConn in state Ready. Otherwise the
+ // producer will be unable to create streams.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
// allowed to complete. No future calls should be made on the SubConn.
@@ -243,6 +260,10 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
+ // MetricsRecorder is the metrics recorder that balancers can use to record
+ // metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this field.
+ MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
@@ -410,6 +431,9 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
}
// ClientConnState describes the state of a ClientConn relevant to the
@@ -431,8 +455,10 @@ type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as `any` to avoid a dependency
- // cycle. Should also return a close function that will be called when all
- // references to the Producer have been given up.
+ // cycle. Build also returns a close function that will be called when all
+ // references to the Producer have been given up for a SubConn, or when a
+ // connectivity state change occurs on the SubConn. The close function
+ // should always block until all asynchronous cleanup work is completed.
Build(grpcClientConnInterface any) (p Producer, close func())
}
diff --git a/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go b/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a7f1eeec8..d5ed172ae 100644
--- a/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
// If resolver state contains no addresses, return an error so ClientConn
- // will trigger re-resolve. Also records this as an resolver error, so when
+ // will trigger re-resolve. Also records this as a resolver error, so when
// the overall state turns transient failure, the error message will have
// the zero address information.
if len(s.ResolverState.Addresses) == 0 {
@@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
new file mode 100644
index 000000000..c51978945
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import "math/rand"
+
+// RandShuffle pseudo-randomizes the order of addresses.
+var RandShuffle = rand.Shuffle
diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 07527603f..e069346a7 100644
--- a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -26,18 +26,23 @@ import (
"math/rand"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
+
+ _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
func init() {
+ if envconfig.NewPickFirstEnabled {
+ return
+ }
balancer.Register(pickfirstBuilder{})
- internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
}
var logger = grpclog.Component("pick-first-lb")
@@ -50,7 +55,7 @@ const (
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
@@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) {
})
}
+// Shuffler is an interface for shuffling an address list.
type Shuffler interface {
ShuffleAddressListForTesting(n int, swap func(i, j int))
}
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
+// is the number of elements. swap swaps the elements with indexes i and j.
func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
@@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for each
@@ -155,7 +163,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
- // target do not forwarrd the corresponding correct endpoints down/split
+ // target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses
diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 000000000..985b6edc7
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,625 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ // Register as the default pick_first balancer.
+ Name = "pick_first"
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ // Name is the name of the pick_first_leaf balancer.
+ // It is changed to "pick_first" in init() if this balancer is to be
+ // registered as the default pickfirst.
+ Name = "pick_first_leaf"
+)
+
+// TODO: change to pick-first when this becomes the default pick_first policy.
+const logPrefix = "[pick-first-leaf-lb %p] "
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ addressList: addressList{},
+ subConns: resolver.NewAddressMap(),
+ state: connectivity.Connecting,
+ mu: sync.Mutex{},
+ }
+ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+ return b
+}
+
+func (b pickfirstBuilder) Name() string {
+ return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of endpoints received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ state connectivity.State
+ lastErr error
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ state: connectivity.Idle,
+ addr: addr,
+ }
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
+}
+
+type pickfirstBalancer struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMap
+ addressList addressList
+ firstPass bool
+ numTF int
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received error from the name resolver: %v", err)
+ }
+
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
+ return
+ }
+
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.state = connectivity.TransientFailure
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ cfg, ok := state.BalancerConfig.(pfConfig)
+ if state.BalancerConfig != nil && !ok {
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+ }
+
+ if b.logger.V(2) {
+ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+ }
+
+ var newAddrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ // "In the flattened list, interleave addresses from the two address
+ // families, as per RFC-8305 section 4." - A61
+ // TODO: support the above language.
+ newAddrs = append(newAddrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ newAddrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+ }
+
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+
+ // Since we have a new set of addresses, we are again at first pass.
+ b.firstPass = true
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ prevAddr := b.addressList.currentAddress()
+ prevAddrsCount := b.addressList.size()
+ b.addressList.updateAddrs(newAddrs)
+ if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) {
+ return nil
+ }
+
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.state = connectivity.Connecting
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.requestConnectionLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.requestConnectionLocked()
+ }
+ return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() {
+ b.firstPass = true
+ b.requestConnectionLocked()
+ }
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.(*scData).subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMap()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMap()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMap()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.(*scData).subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMap()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ scd := sd.(*scData)
+ switch scd.state {
+ case connectivity.Idle:
+ scd.subConn.Connect()
+ case connectivity.TransientFailure:
+ // Try the next address.
+ lastErr = scd.lastErr
+ continue
+ case connectivity.Ready:
+ // Should never happen.
+ b.logger.Errorf("Requesting a connection even though we have a READY SubConn")
+ case connectivity.Shutdown:
+ // Should never happen.
+ b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map")
+ case connectivity.Connecting:
+ // Wait for the SubConn to report success or failure.
+ }
+ return
+ }
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass.
+ b.endFirstPassLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.state
+ sd.state = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd {
+ return
+ }
+ if newState.ConnectivityState == connectivity.Shutdown {
+ return
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
+ return
+ }
+ b.state = connectivity.Ready
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ b.state = connectivity.Idle
+ b.addressList.reset()
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The balancer can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ // If the balancer is already in CONNECTING, no update is needed.
+ if b.state == connectivity.Idle {
+ b.state = connectivity.Connecting
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. We ignore such updates.
+
+ if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ return
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ // End of the first pass.
+ b.endFirstPassLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ }
+}
+
+func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) {
+ b.firstPass = false
+ b.numTF = 0
+ b.state = connectivity.TransientFailure
+
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.state == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+type picker struct {
+ result balancer.PickResult
+ err error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.exitIdle()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+// first returns the first address in the list. If the list is empty, it returns
+// an empty address instead.
+func (al *addressList) first() resolver.Address {
+ if len(al.addresses) == 0 {
+ return resolver.Address{}
+ }
+ return al.addresses[0]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
diff --git a/tools/vendor/google.golang.org/grpc/balancer_wrapper.go b/tools/vendor/google.golang.org/grpc/balancer_wrapper.go
index 4161fdf47..2a4f2878a 100644
--- a/tools/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/tools/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -24,13 +24,18 @@ import (
"sync"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
)
+var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
@@ -79,6 +84,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParent: cc.channelz,
Target: cc.parsedTarget,
+ MetricsRecorder: cc.metricsRecorderList,
},
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
@@ -92,7 +98,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
errCh := make(chan error)
- ok := ccb.serializer.Schedule(func(ctx context.Context) {
+ uccs := func(ctx context.Context) {
defer close(errCh)
if ctx.Err() != nil || ccb.balancer == nil {
return
@@ -107,17 +113,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
errCh <- err
- })
- if !ok {
- return nil
}
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
return <-errCh
}
// resolverError is invoked by grpc to push a resolver error to the underlying
// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -133,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() {
ccb.closed = true
ccb.mu.Unlock()
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
- ccb.serializer.Schedule(func(context.Context) {
+ ccb.serializer.TrySchedule(func(context.Context) {
if ccb.balancer == nil {
return
}
@@ -145,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() {
// exitIdle invokes the balancer's exitIdle method in the serializer.
func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -182,7 +194,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return acbw, nil
}
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
// The graceful switch balancer will never call this.
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
}
@@ -246,21 +258,28 @@ type acBalancerWrapper struct {
ccb *ccBalancerWrapper // read-only
stateListener func(balancer.SubConnState)
- mu sync.Mutex
- producers map[balancer.ProducerBuilder]*refCountedProducer
+ producersMu sync.Mutex
+ producers map[balancer.ProducerBuilder]*refCountedProducer
}
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
- acbw.ccb.serializer.Schedule(func(ctx context.Context) {
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
}
+ // Invalidate all producers on any state change.
+ acbw.closeProducers()
+
// Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
- acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ acbw.stateListener(scs)
})
}
@@ -277,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() {
}
func (acbw *acBalancerWrapper) Shutdown() {
+ acbw.closeProducers()
acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
@@ -284,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
// ready, blocks until it is or ctx expires. Returns an error when the context
// expires or the addrConn is shut down.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport, err := acbw.ac.getTransport(ctx)
- if err != nil {
- return nil, err
+ transport := acbw.ac.getReadyTransport()
+ if transport == nil {
+ return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
@@ -311,15 +332,15 @@ type refCountedProducer struct {
}
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
// Look up existing producer from this builder.
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
acbw.producers[pb] = pData
}
// Account for this new reference.
@@ -329,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
// and delete the refCountedProducer from the map if the total reference
// count goes to zero.
unref := func() {
- acbw.mu.Lock()
+ acbw.producersMu.Lock()
+ // If closeProducers has already closed this producer instance, refs is
+ // set to 0, so the check after decrementing will never pass, and the
+ // producer will not be double-closed.
pData.refs--
if pData.refs == 0 {
defer pData.close() // Run outside the acbw mutex
delete(acbw.producers, pb)
}
- acbw.mu.Unlock()
+ acbw.producersMu.Unlock()
}
return pData.producer, grpcsync.OnceFunc(unref)
}
+
+func (acbw *acBalancerWrapper) closeProducers() {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+ for pb, pData := range acbw.producers {
+ pData.refs = 0
+ pData.close()
+ delete(acbw.producers, pb)
+ }
+}
diff --git a/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 63c639e4f..55bffaa77 100644
--- a/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GrpcLogEntry); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ClientHeader); i {
case 0:
return &v.state
@@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ServerHeader); i {
case 0:
return &v.state
@@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Trailer); i {
case 0:
return &v.state
@@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Message); i {
case 0:
return &v.state
@@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Metadata); i {
case 0:
return &v.state
@@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*MetadataEntry); i {
case 0:
return &v.state
@@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Address); i {
case 0:
return &v.state
@@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
}
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
diff --git a/tools/vendor/google.golang.org/grpc/clientconn.go b/tools/vendor/google.golang.org/grpc/clientconn.go
index 423be7b43..19763f8ed 100644
--- a/tools/vendor/google.golang.org/grpc/clientconn.go
+++ b/tools/vendor/google.golang.org/grpc/clientconn.go
@@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -39,6 +40,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
return cc, nil
}
@@ -590,13 +595,14 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
- authority string // See initAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
- idlenessMgr *idle.Manager
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -626,11 +632,6 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
cc.csMgr.updateState(connectivity.TransientFailure)
}
-// Makes a copy of the input addresses slice and clears out the balancer
-// attributes field. Addresses are passed during subconn creation and address
-// update operations. In both cases, we will clear the balancer attributes by
-// calling this function, and therefore we will be able to use the Equal method
-// provided by the resolver.Address type for comparison.
-func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
- for i := range in {
- out[i] = in[i]
- out[i].BalancerAttributes = nil
- }
+ copy(out, in)
return out
}
@@ -837,12 +827,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
- addrs: copyAddressesWithoutBalancerAttributes(addrs),
+ addrs: copyAddresses(addrs),
scopts: opts,
dopts: cc.dopts,
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
- stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
@@ -918,28 +907,29 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.mu.Unlock()
- ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-func equalAddresses(a, b []resolver.Address) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if !v.Equal(b[i]) {
- return false
- }
- }
- return true
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ addrs = copyAddresses(addrs)
limit := len(addrs)
if limit > 5 {
limit = 5
@@ -947,7 +937,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
ac.mu.Lock()
- if equalAddresses(ac.addrs, addrs) {
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@@ -966,7 +956,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
- if a.Equal(ac.curAddr) {
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@@ -992,11 +982,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
- ac.mu.Unlock()
-
// Since we were connecting/connected, we should start a new connection
// attempt.
- go ac.resetTransport()
+ go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@@ -1152,10 +1140,15 @@ func (cc *ClientConn) Close() error {
<-cc.resolverWrapper.serializer.Done()
<-cc.balancerWrapper.serializer.Done()
-
+ var wg sync.WaitGroup
for ac := range conns {
- ac.tearDown(ErrClientConnClosing)
+ wg.Add(1)
+ go func(ac *addrConn) {
+ defer wg.Done()
+ ac.tearDown(ErrClientConnClosing)
+ }(ac)
}
+ wg.Wait()
cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
@@ -1190,8 +1183,7 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
- stateChan chan struct{} // closed and recreated on every state change.
+ state connectivity.State
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@@ -1204,9 +1196,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
- // When changing states, reset the state change channel.
- close(ac.stateChan)
- ac.stateChan = make(chan struct{})
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1214,7 +1203,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.acbw.updateState(s, lastErr)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1231,8 +1220,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
-func (ac *addrConn) resetTransport() {
- ac.mu.Lock()
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@@ -1263,6 +1254,8 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ // TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+ // to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
ac.mu.Lock()
if acCtx.Err() != nil {
@@ -1304,7 +1297,7 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
}
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
@@ -1516,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
-// getTransport waits until the addrconn is ready and returns the transport.
-// If the context expires first, returns an appropriate status. If the
-// addrConn is stopped first, returns an Unavailable status error.
-func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
- for ctx.Err() == nil {
- ac.mu.Lock()
- t, state, sc := ac.transport, ac.state, ac.stateChan
- ac.mu.Unlock()
- if state == connectivity.Ready {
- return t, nil
- }
- if state == connectivity.Shutdown {
- return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
- }
-
- select {
- case <-ctx.Done():
- case <-sc:
- }
- }
- return nil, status.FromContextError(ctx.Err()).Err()
-}
-
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@@ -1585,7 +1555,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancelation of cc.ctx.
+ // closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
diff --git a/tools/vendor/google.golang.org/grpc/codec.go b/tools/vendor/google.golang.org/grpc/codec.go
index 411e3dfd4..e840858b7 100644
--- a/tools/vendor/google.golang.org/grpc/codec.go
+++ b/tools/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
diff --git a/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 82bee1443..4c805c644 100644
--- a/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
-func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}
diff --git a/tools/vendor/google.golang.org/grpc/credentials/tls.go b/tools/vendor/google.golang.org/grpc/credentials/tls.go
index 411435854..e163a473d 100644
--- a/tools/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/tools/vendor/google.golang.org/grpc/credentials/tls.go
@@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
- tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+ config := applyDefaults(c)
+ if config.GetConfigForClient != nil {
+ oldFn := config.GetConfigForClient
+ config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+ cfgForClient, err := oldFn(hello)
+ if err != nil || cfgForClient == nil {
+ return cfgForClient, err
+ }
+ return applyDefaults(cfgForClient), nil
+ }
+ }
+ return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+ config := credinternal.CloneTLSConfig(c)
+ config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
// If the user did not configure a MinVersion and did not configure a
// MaxVersion < 1.2, use MinVersion=1.2, which is required by
// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
- if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
- tc.config.MinVersion = tls.VersionTLS12
+ if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+ config.MinVersion = tls.VersionTLS12
}
// If the user did not configure CipherSuites, use all "secure" cipher
// suites reported by the TLS package, but remove some explicitly forbidden
// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
- if tc.config.CipherSuites == nil {
+ if config.CipherSuites == nil {
for _, cs := range tls.CipherSuites() {
if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
- tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
+ config.CipherSuites = append(config.CipherSuites, cs.ID)
}
}
}
- return tc
+ return config
}
// NewClientTLSFromCert constructs TLS credentials from the provided root
diff --git a/tools/vendor/google.golang.org/grpc/dialoptions.go b/tools/vendor/google.golang.org/grpc/dialoptions.go
index f5453d48a..518692c3a 100644
--- a/tools/vendor/google.golang.org/grpc/dialoptions.go
+++ b/tools/vendor/google.golang.org/grpc/dialoptions.go
@@ -33,6 +33,7 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
@@ -60,7 +61,7 @@ func init() {
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithRecvBufferPool = withRecvBufferPool
+ internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -92,7 +93,6 @@ type dialOptions struct {
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
- recvBufferPool SharedBufferPool
defaultScheme string
maxCallAttempts int
}
@@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption {
// option to true from the Control field. For a concrete example of how to do
// this, see internal.NetDialerWithTCPKeepalive().
//
-// For more information, please see [issue 23459] in the Go github repo.
+// For more information, please see [issue 23459] in the Go GitHub repo.
//
// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
@@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption {
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions {
WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
bs: internalbackoff.DefaultExponential,
healthCheckFunc: internal.HealthCheckFunc,
idleTimeout: 30 * time.Minute,
- recvBufferPool: nopBufferPool{},
defaultScheme: "dns",
maxCallAttempts: defaultMaxCallAttempts,
}
@@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption {
})
}
-// WithRecvBufferPool returns a DialOption that configures the ClientConn
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
- return withRecvBufferPool(bufferPool)
-}
-
-func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.recvBufferPool = bufferPool
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/tools/vendor/google.golang.org/grpc/doc.go b/tools/vendor/google.golang.org/grpc/doc.go
index 0022859ad..e7b532b6f 100644
--- a/tools/vendor/google.golang.org/grpc/doc.go
+++ b/tools/vendor/google.golang.org/grpc/doc.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.
diff --git a/tools/vendor/google.golang.org/grpc/encoding/encoding.go b/tools/vendor/google.golang.org/grpc/encoding/encoding.go
index 5ebf88d71..11d0ae142 100644
--- a/tools/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/tools/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -94,7 +94,7 @@ type Codec interface {
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 000000000..074c5e234
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go b/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66d5cdf03..ceec319dd 100644
--- a/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"fmt"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)
@@ -32,28 +33,51 @@ import (
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-func (codec) Marshal(v any) ([]byte, error) {
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
vv := messageV2Of(v)
if vv == nil {
- return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ size := proto.Size(vv)
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
}
-func (codec) Unmarshal(data []byte, v any) error {
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
vv := messageV2Of(v)
if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
- return proto.Unmarshal(data, vv)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
}
func messageV2Of(v any) proto.Message {
@@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message {
return nil
}
-func (codec) Name() string {
+func (c *codecV2) Name() string {
return Name
}
diff --git a/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 000000000..1d827dd5d
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,269 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = NewMetrics()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name Metric
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[Metric]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[Metric]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metric Metric) *MetricDescriptor {
+ return metricsRegistry[metric]
+}
+
+func registerMetric(name Metric, def bool) {
+ if registeredMetrics[name] {
+ logger.Fatalf("metric %v already registered", name)
+ }
+ registeredMetrics[name] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(name)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[Metric]bool)
+ metricsRegistry = make(map[Metric]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 000000000..3221f7a63
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "maps"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metric is an identifier for a metric.
+type Metric string
+
+// Metrics is a set of metrics to record. Once created, Metrics is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetrics instead.
+type Metrics struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[Metric]bool
+}
+
+// NewMetrics returns a Metrics containing Metrics.
+func NewMetrics(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *Metrics) Metrics() map[Metric]bool {
+ return m.metrics
+}
+
+// Add adds the metrics to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *Metrics) Add(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *Metrics) Join(metrics *Metrics) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Remove removes the metrics from the metrics set and returns a new copy with
+// the metrics removed.
+func (m *Metrics) Remove(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ delete(newMetrics, metric)
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/component.go b/tools/vendor/google.golang.org/grpc/grpclog/component.go
index ac73c9ced..f1ae080dc 100644
--- a/tools/vendor/google.golang.org/grpc/grpclog/component.go
+++ b/tools/vendor/google.golang.org/grpc/grpclog/component.go
@@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
-
- "google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.InfoDepth(depth+1, args...)
+ InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.WarningDepth(depth+1, args...)
+ WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.ErrorDepth(depth+1, args...)
+ ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.FatalDepth(depth+1, args...)
+ FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go b/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16928c9cb..db320105e 100644
--- a/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
import (
"os"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
func init() {
@@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return grpclog.Logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
- grpclog.Logger.Warning(args...)
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
- grpclog.Logger.Warningf(format, args...)
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
- grpclog.Logger.Warningln(args...)
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
- grpclog.Logger.Error(args...)
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
- grpclog.Logger.Errorf(format, args...)
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
- grpclog.Logger.Errorln(args...)
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
- grpclog.Logger.Fatal(args...)
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
- grpclog.Logger.Fatalf(format, args...)
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
- grpclog.Logger.Fatalln(args...)
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 000000000..59c03bc14
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 000000000..e524fdd40
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/tools/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
similarity index 52%
rename from tools/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
rename to tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
index bfc45102a..07df71e98 100644
--- a/tools/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2020 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,59 +16,17 @@
*
*/
-// Package grpclog (internal) defines depth logging for grpc.
-package grpclog
+package internal
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
"os"
)
-// Logger is the logger used for the non-depth log functions.
-var Logger LoggerV2
-
-// DepthLogger is the logger used for the depth log functions.
-var DepthLogger DepthLoggerV2
-
-// InfoDepth logs to the INFO log at the specified depth.
-func InfoDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.InfoDepth(depth, args...)
- } else {
- Logger.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-func WarningDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.WarningDepth(depth, args...)
- } else {
- Logger.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-func ErrorDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.ErrorDepth(depth, args...)
- } else {
- Logger.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-func FatalDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.FatalDepth(depth, args...)
- } else {
- Logger.Fatalln(args...)
- }
- os.Exit(1)
-}
-
// LoggerV2 does underlying logging work for grpclog.
-// This is a copy of the LoggerV2 defined in the external grpclog package. It
-// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
@@ -107,14 +65,13 @@ type LoggerV2 interface {
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
-// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
-// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
+ LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
@@ -124,3 +81,124 @@ type DepthLoggerV2 interface {
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.output(fatalLog, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.output(fatalLog, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.output(fatalLog, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ var m []*log.Logger
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+ m = append(m, log.New(infoW, "", flag))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+ ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+ m = append(m, log.New(ew, "", flag))
+ m = append(m, log.New(ew, "", flag))
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/logger.go b/tools/vendor/google.golang.org/grpc/grpclog/logger.go
index b1674d826..4b2035857 100644
--- a/tools/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/tools/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,70 +18,17 @@
package grpclog
-import "google.golang.org/grpc/internal/grpclog"
+import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- grpclog.Logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index ecfd36d71..892dc13d1 100644
--- a/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,52 +19,16 @@
package grpclog
import (
- "encoding/json"
- "fmt"
"io"
- "log"
"os"
"strconv"
"strings"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
- grpclog.Logger = l
- grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
-}
-
-type loggerV2Config struct {
- verbose int
- jsonFormat bool
-}
-
-func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
- var m []*log.Logger
- flag := log.LstdFlags
- if c.jsonFormat {
- flag = 0
- }
- m = append(m, log.New(infoW, "", flag))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, "", flag))
- m = append(m, log.New(ew, "", flag))
- return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
- verbose: v,
- jsonFormat: jsonFormat,
- })
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
})
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.output(infoLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.output(infoLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.output(infoLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.output(warningLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.output(warningLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.output(warningLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Error(args ...any) {
- g.output(errorLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.output(errorLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.output(errorLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.output(fatalLog, fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.output(fatalLog, fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.output(fatalLog, fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
@@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 38b883507..d92335445 100644
--- a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
@@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() {
return nil
}
}
- file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckResponse); i {
case 0:
return &v.state
diff --git a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index 51b736ba0..f96b8ab49 100644
--- a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@@ -73,7 +73,7 @@ type HealthClient interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
}
type healthClient struct {
@@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &healthWatchClient{ClientStream: stream}
+ x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts .
return x, nil
}
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
-// for forward compatibility
+// for forward compatibility.
//
// Health is gRPC's mechanism for checking whether a server is able to handle
// RPCs. Its semantics are documented in
@@ -160,19 +146,23 @@ type HealthServer interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
+ Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
}
-// UnimplementedHealthServer should be embedded to have forward compatible implementations.
-type UnimplementedHealthServer struct {
-}
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
-func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HealthServer will
@@ -182,6 +172,13 @@ type UnsafeHealthServer interface {
}
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ // If the following call panics, it indicates UnimplementedHealthServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&Health_ServiceDesc, srv)
}
@@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
-}
-
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
+ return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
}
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
index 13821a926..85540f86a 100644
--- a/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
+++ b/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -33,6 +33,8 @@ type lbConfig struct {
childConfig serviceconfig.LoadBalancingConfig
}
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
func ChildName(l serviceconfig.LoadBalancingConfig) string {
return l.(*lbConfig).childBuilder.Name()
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index aa4505a87..966932891 100644
--- a/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go b/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go
index d7e9e1d54..3ec662799 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -43,6 +43,8 @@ type Channel struct {
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
traceRefCount int32
+ // ChannelMetrics holds connectivity state, target and call metrics for the
+ // channel within channelz.
ChannelMetrics ChannelMetrics
}
@@ -50,6 +52,8 @@ type Channel struct {
// nesting.
func (c *Channel) channelzIdentifier() {}
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
func (c *Channel) String() string {
if c.Parent == nil {
return fmt.Sprintf("Channel #%d", c.ID)
@@ -61,24 +65,31 @@ func (c *Channel) id() int64 {
return c.ID
}
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
func (c *Channel) SubChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.subChans)
}
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
func (c *Channel) NestedChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.nestedChans)
}
+// Trace returns a copy of the Channel's trace data.
func (c *Channel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()
return c.trace.copy()
}
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
type ChannelMetrics struct {
// The current connectivity state of the channel.
State atomic.Pointer[connectivity.State]
@@ -136,12 +147,16 @@ func strFromPointer(s *string) string {
return *s
}
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
func (c *ChannelMetrics) String() string {
return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
)
}
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
c := &ChannelMetrics{}
c.State.Store(&state)
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
index dfe18b089..64c791953 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -46,7 +46,7 @@ type entry interface {
// channelMap is the storage data structure for channelz.
//
-// Methods of channelMap can be divided in two two categories with respect to
+// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
@@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
if maxResults <= 0 {
maxResults = EntriesPerPage
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 03e24e150..078bb8123 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -33,7 +33,7 @@ var (
// outside this package except by tests.
IDGen IDGenerator
- db *channelMap = newChannelMap()
+ db = newChannelMap()
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
EntriesPerPage = 50
curState int32
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/server.go b/tools/vendor/google.golang.org/grpc/internal/channelz/server.go
index cdfc49d6e..b5a824992 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/server.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se
return sm
}
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
sm.CallsStarted.Store(o.CallsStarted.Load())
sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go b/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go
index fa64834b2..90103847c 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct {
RemoteFlowControlWindow int64
}
+// SocketType represents the type of socket.
type SocketType string
+// SocketType can be one of these.
const (
SocketTypeNormal = "NormalSocket"
SocketTypeListen = "ListenSocket"
)
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
type Socket struct {
Entity
SocketType SocketType
@@ -100,6 +105,8 @@ type Socket struct {
Security credentials.ChannelzSecurityValue
}
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
func (ls *Socket) String() string {
return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
index 3b88e4cba..b20802e6e 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 {
return sc.ID
}
+// Sockets returns a copy of the sockets map associated with the SubChannel.
func (sc *SubChannel) Sockets() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(sc.sockets)
}
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
func (sc *SubChannel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
index d1ed8df6a..0e6e18e18 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -35,13 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
+func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c any) *SocketOptionData {
+func GetSocketOption(any) *SocketOptionData {
return nil
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go b/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go
index 36b867403..2bffe4777 100644
--- a/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ b/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -79,13 +79,21 @@ type TraceEvent struct {
Parent *TraceEvent
}
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
type ChannelTrace struct {
- cm *channelMap
- clearCalled bool
+ cm *channelMap
+ clearCalled bool
+ // The time when the trace was created.
CreationTime time.Time
- EventNum int64
- mu sync.Mutex
- Events []*traceEvent
+ // A counter for the number of events recorded in the
+ // trace.
+ EventNum int64
+ mu sync.Mutex
+ // A slice of traceEvent pointers representing the events recorded for
+ // this channel.
+ Events []*traceEvent
}
func (c *ChannelTrace) copy() *ChannelTrace {
@@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{
RefNormalSocket: "NormalSocket",
}
+// String returns a string representation of the RefChannelType
func (r RefChannelType) String() string {
return refChannelTypeToString[r]
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index d90648713..6e7dd6b77 100644
--- a/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -45,7 +45,16 @@ var (
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
- EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+ // XDSFallbackSupport is the env variable that controls whether support for
+ // xDS fallback is turned on. If this is unset or is false, only the first
+ // xDS server in the list of server configs will be used.
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
+ // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+ // instead of the exiting pickfirst implementation. This can be enabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+ // to "true".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/tools/vendor/google.golang.org/grpc/internal/experimental.go b/tools/vendor/google.golang.org/grpc/internal/experimental.go
index 7f7044e17..7617be215 100644
--- a/tools/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/tools/vendor/google.golang.org/grpc/internal/experimental.go
@@ -18,11 +18,11 @@
package internal
var (
- // WithRecvBufferPool is implemented by the grpc package and returns a dial
+ // WithBufferPool is implemented by the grpc package and returns a dial
// option to configure a shared buffer pool for a grpc.ClientConn.
- WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
- // RecvBufferPool is implemented by the grpc package and returns a server
+ // BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
- RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
)
diff --git a/tools/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
similarity index 63%
rename from tools/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
rename to tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
index faa998de7..092ad187a 100644
--- a/tools/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -16,17 +16,21 @@
*
*/
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
+
+ "google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
- logger DepthLoggerV2
+ logger grpclog.DepthLoggerV2
prefix string
}
@@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
- InfoDepth(1, fmt.Sprintf(format, args...))
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
- WarningDepth(1, fmt.Sprintf(format, args...))
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
- ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Debugf does info logging at verbose level 2.
-func (pl *PrefixLogger) Debugf(format string, args ...any) {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- if !Logger.V(2) {
- return
- }
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- InfoDepth(1, fmt.Sprintf(format, args...))
-
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- return Logger.V(l)
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index f7f40a16a..8e8e86128 100644
--- a/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
-// Schedule adds a callback to be scheduled after existing callbacks are run.
+// TrySchedule tries to schedule the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
//
-// Return value indicates if the callback was successfully added to the list of
-// callbacks to be executed by the serializer. It is not possible to add
-// callbacks once the context passed to NewCallbackSerializer is cancelled.
-func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- return cs.callbacks.Put(f) == nil
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
+ }
}
func (cs *CallbackSerializer) run(ctx context.Context) {
diff --git a/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
index aef8cec1a..6d8c2f518 100644
--- a/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ b/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {
diff --git a/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go
index ec62b4775..683d1955c 100644
--- a/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go
+++ b/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
}
// baseContentType is the base content-type for gRPC. This is a valid
-// content-type on it's own, but can also include a content-subtype such as
+// content-type on its own, but can also include a content-subtype such as
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
diff --git a/tools/vendor/google.golang.org/grpc/internal/idle/idle.go b/tools/vendor/google.golang.org/grpc/internal/idle/idle.go
index fe49cb74c..2c13ee9da 100644
--- a/tools/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/tools/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool {
return true
}
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
func (m *Manager) EnterIdleModeForTesting() {
m.tryEnterIdleMode()
}
@@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
- // here. The first one to get the lock would got the channel to exit idle.
+ // here. The first one to get the lock would get the channel to exit idle.
// - Channel is not in idle mode, and the user calls Connect which calls
// m.ExitIdleMode.
//
@@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
+// Close stops the timer associated with the Manager, if it exists.
func (m *Manager) Close() {
atomic.StoreInt32(&m.closed, 1)
diff --git a/tools/vendor/google.golang.org/grpc/internal/internal.go b/tools/vendor/google.golang.org/grpc/internal/internal.go
index 5d6653986..20b4dc3d3 100644
--- a/tools/vendor/google.golang.org/grpc/internal/internal.go
+++ b/tools/vendor/google.golang.org/grpc/internal/internal.go
@@ -183,7 +183,7 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
- GRPCResolverSchemeExtraMetadata string = "xds"
+ GRPCResolverSchemeExtraMetadata = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
EnterIdleModeForTesting any // func(*grpc.ClientConn)
@@ -191,6 +191,8 @@ var (
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+ // ChannelzTurnOffForTesting disables the Channelz service for testing
+ // purposes.
ChannelzTurnOffForTesting func()
// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
@@ -203,11 +205,27 @@ var (
// UserSetDefaultScheme is set to true if the user has overridden the
// default resolver scheme.
- UserSetDefaultScheme bool = false
+ UserSetDefaultScheme = false
- // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
- // is the number of elements. swap swaps the elements with indexes i and j.
- ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
)
// HealthChecker defines the signature of the client-side LB channel health
@@ -215,7 +233,7 @@ var (
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
diff --git a/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 4552db16b..374c12fb7 100644
--- a/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -177,7 +177,7 @@ type dnsResolver struct {
// finished. Otherwise, data race will be possible. [Race Example] in
// dns_resolver_test we replace the real lookup functions with mocked ones to
// facilitate testing. If Close() doesn't wait for watcher() goroutine
- // finishes, race detector sometimes will warns lookup (READ the lookup
+ // finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
@@ -237,7 +237,9 @@ func (d *dnsResolver) watcher() {
}
func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
- if !EnableSRVLookups {
+ // Skip this particular host to avoid timeouts with some versions of
+ // systemd-resolved.
+ if !EnableSRVLookups || d.host == "metadata.google.internal." {
return nil, nil
}
var newAddrs []resolver.Address
diff --git a/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index afac56572..b901c7bac 100644
--- a/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/tools/vendor/google.golang.org/grpc/internal/stats/labels.go b/tools/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 000000000..fd33af51a
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+ // TelemetryLabels are the telemetry labels to record.
+ TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+ labels, _ := ctx.Value(labelsKey{}).(*Labels)
+ return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+ // could also append
+ return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 000000000..79044657b
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/tools/vendor/google.golang.org/grpc/internal/status/status.go b/tools/vendor/google.golang.org/grpc/internal/status/status.go
index c7dbc8205..1186f1e9a 100644
--- a/tools/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/tools/vendor/google.golang.org/grpc/internal/status/status.go
@@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
- any, err := anypb.New(protoadapt.MessageV2Of(detail))
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
- p.Details = append(p.Details, any)
+ p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
func (s *Status) Details() []any {
if s == nil || s.s == nil {
return nil
@@ -160,7 +162,38 @@ func (s *Status) Details() []any {
details = append(details, err)
continue
}
- details = append(details, detail)
+ // The call to MessageV1Of is required to unwrap the proto message if
+ // it implemented only the MessageV1 API. The proto message would have
+ // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+ // added to a global registry used by any.UnmarshalNew().
+ // MessageV1Of has the following behaviour:
+ // 1. If the given message is a wrapped MessageV1, it returns the
+ // unwrapped value.
+ // 2. If the given message already implements MessageV1, it returns it
+ // as is.
+ // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+ //
+ // Since the Status.WithDetails() API only accepts MessageV1, calling
+ // MessageV1Of ensures we return the same type that was given to
+ // WithDetails:
+ // * If the give type implemented only MessageV1, the unwrapping from
+ // point 1 above will restore the type.
+ // * If the given type implemented both MessageV1 and MessageV2, point 2
+ // above will ensure no wrapping is performed.
+ // * If the given type implemented only MessageV2 and was wrapped using
+ // MessageV1Of before passing to WithDetails(), it would be unwrapped
+ // in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+ // that the type is wrapped in a MessageV1 wrapper again before
+ // returning. Note that protoc-gen-go doesn't generate code which
+ // implements ONLY MessageV2 at the time of writing.
+ //
+ // NOTE: Status details can also be added using the FromProto method.
+ // This could theoretically allow passing a Detail message that only
+ // implements the V2 API. In such a case the message will be wrapped in
+ // a MessageV1 wrapper when fetched using Details().
+ // Since protoc-gen-go generates only code that implements both V1 and
+ // V2 APIs for backward compatibility, this is not a concern.
+ details = append(details, protoadapt.MessageV1Of(detail))
}
return details
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index 999f52cd7..54c24c2ff 100644
--- a/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
index 078137b7f..7e7aaa546 100644
--- a/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ b/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
diff --git a/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
index fd7d43a89..d5c1085ee 100644
--- a/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
+++ b/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 3deadfb4a..ef72fbb3a 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
- d []byte
+ reader mem.Reader
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
- var wakeUp bool
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
if !f() { // f wasn't successful
- c.mu.Unlock()
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- c.trfChan.Store(make(chan struct{}))
+ ch := make(chan struct{})
+ c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(chan struct{})
- close(ch)
- c.trfChan.Store((chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ _ = v.reader.Close()
}
}
+
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
- ch, _ := c.trfChan.Load().(chan struct{})
+ ch := c.trfChan.Swap(nil)
if ch != nil {
- close(ch)
+ close(*ch)
}
- c.trfChan.Store((chan struct{})(nil))
- c.mu.Unlock()
}
type side int
@@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -490,12 +511,13 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
conn: conn,
logger: logger,
ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ _ = df.reader.Close()
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possible HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
- var (
- buf []byte
- )
+
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, len(dataItem.d))
- if hSize != 0 {
- if dSize == 0 {
- buf = dataItem.h
- } else {
- // We can add some data to grpc message header to distribute bytes more equally across frames.
- // Copy on the stack to avoid generating garbage
- var localBuf [http2MaxFrameLen]byte
- copy(localBuf[:hSize], dataItem.h)
- copy(localBuf[hSize:], dataItem.d[:dSize])
- buf = localBuf[:hSize+dSize]
- }
+ dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+ remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
} else {
- buf = dataItem.d
- }
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
- size := hSize + dSize
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = dataItem.reader.Read((*buf)[hSize:])
+ }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
- dataItem.d = dataItem.d[dSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 4a3ddce29..ce878693b 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@@ -171,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
@@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
- s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
headersWritten := s.updateHeaderSent()
- return ht.do(func() {
+ err := ht.do(func() {
+ defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
- ht.rw.Write(data)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
ht.rw.(http.Flusher).Flush()
})
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
@@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain(debugData string) {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 3c63c7069..62b81885d 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -47,6 +47,7 @@ import (
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -59,6 +60,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
@@ -83,9 +86,9 @@ type http2Client struct {
writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
- goAway chan struct{}
-
- framer *framer
+ goAway chan struct{}
+ keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+ framer *framer
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
// Do not access controlBuf with mu held.
@@ -144,7 +147,7 @@ type http2Client struct {
onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
@@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
- // The following defer and goroutine monitor the connectCtx for cancelation
+ // The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@@ -332,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
+ keepaliveDone: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
@@ -346,7 +350,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
onClose: onClose,
}
var czSecurity credentials.ChannelzSecurityValue
@@ -463,7 +467,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@@ -504,7 +508,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -525,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer {
// to be the last frame loopy writes to the transport.
func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
t.mu.Lock()
- defer t.mu.Unlock()
- if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
+ maxStreamID := t.nextID - 2
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
return false, g.closeConn
@@ -770,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@@ -983,6 +987,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// only once on a transport. Once it is called, the transport should not be
// accessed anymore.
func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1005,18 +1010,33 @@ func (t *http2Client) Close(err error) {
// should unblock it so that the goroutine eventually exits.
t.kpDormancyCond.Signal()
}
+ // Append info about previous goaways if there were any, since this may be important
+ // for understanding the root cause for this connection to be closed.
+ goAwayDebugMessage := t.goAwayDebugMessage
t.mu.Unlock()
+
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
- // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
- <-t.writerDone
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+ }
t.cancel()
t.conn.Close()
+ // Waits for the reader and keepalive goroutines to exit before returning to
+ // ensure all resources are cleaned up before Close can return.
+ <-t.readerDone
+ if t.keepaliveEnabled {
+ <-t.keepaliveDone
+ }
channelz.RemoveEntry(t.channelz.ID)
- // Append info about previous goaways if there were any, since this may be important
- // for understanding the root cause for this connection to be closed.
- _, goAwayDebugMessage := t.GetGoAwayReason()
-
var st *status.Status
if len(goAwayDebugMessage) > 0 {
st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
@@ -1065,27 +1085,36 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
+ reader := data.Reader()
+
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
+ _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
+ _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- d: data,
+ reader: reader,
}
- if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return err
}
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
func (t *http2Client) getStream(f http2.Frame) *Stream {
@@ -1190,10 +1219,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1222,7 +1254,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@@ -1291,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
t.controlBuf.put(pingAck)
}
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return
+ return nil
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
@@ -1307,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
- return
+ return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
}
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
@@ -1325,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
- return
+ return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
}
default:
t.setGoAwayReason(f)
@@ -1350,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.prevGoAwayID = id
if len(t.activeStreams) == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
- return
+ return connectionErrorf(true, nil, "received goaway and there are no active streams")
}
streamsToClose := make([]*Stream, 0)
@@ -1368,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for _, stream := range streamsToClose {
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
+ return nil
}
// setGoAwayReason sets the value of t.goAwayReason based
@@ -1603,7 +1633,13 @@ func (t *http2Client) readServerPreface() error {
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
- defer close(t.readerDone)
+ var errClose error
+ defer func() {
+ close(t.readerDone)
+ if errClose != nil {
+ t.Close(errClose)
+ }
+ }()
if err := t.readServerPreface(); err != nil {
errCh <- err
@@ -1642,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
- return
}
+ // Transport error.
+ errClose = connectionErrorf(true, err, "error reading from server: %v", err)
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1660,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) {
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
- t.handleGoAway(frame)
+ errClose = t.handleGoAway(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
@@ -1671,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
+ var err error
+ defer func() {
+ close(t.keepaliveDone)
+ if err != nil {
+ t.Close(err)
+ }
+ }()
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
@@ -1703,7 +1738,7 @@ func (t *http2Client) keepalive() {
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+ err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
return
}
t.mu.Lock()
@@ -1745,7 +1780,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index b7091165b..584b50fe5 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -39,6 +39,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
@@ -119,7 +120,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelz *channelz.Socket
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
@@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- bufferPool: newBufferPool(),
+ bufferPool: config.BufferPool,
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
@@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ reader := data.Reader()
+
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
+ _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
+ _ = reader.Close()
return t.streamContextErr(s)
}
}
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ reader: reader,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go b/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 39cef3bd4..3613d7b64 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- n, err = w.conn.Write(b)
+ n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.flushKeepBuffer()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
@@ -389,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
-var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
+var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go b/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 24fa10325..54b224436 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
-
- return &bufConn{Conn: conn, r: r}, nil
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
+ }
+ return conn, nil
}
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/transport.go b/tools/vendor/google.golang.org/grpc/internal/transport/transport.go
index 4b39c0ade..e12cb0bc9 100644
--- a/tools/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/tools/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,7 +22,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -37,6 +36,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -47,32 +47,10 @@ import (
const logLevel = 2
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() any {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
-
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -148,45 +129,97 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
+ }
+ if r.closeStream != nil {
+ n, r.err = r.readHeaderClient(header)
+ } else {
+ n, r.err = r.readHeader(header)
+ }
+ return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
r.last = nil
}
- return copied, nil
+ return buf, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ buf, r.err = r.readClient(n)
} else {
- n, r.err = r.read(p)
+ buf, r.err = r.read(n)
}
- return n, r.err
+ return buf, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
+ }
+}
+
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readHeaderAdditional(m, header)
+ case m := <-r.recv.get():
+ return r.readHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
+ }
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
}
- return copied, nil
+
+ return m.buffer, nil
}
type streamState uint32
@@ -241,7 +289,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
+ ct ClientTransport // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@@ -251,7 +299,7 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
@@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool {
return s.noHeaders
}
-// Trailer returns the cached trailer metedata. Note that if it is not called
+// Trailer returns the cached trailer metadata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
// It can be safely read only after stream has ended that is either read
@@ -499,36 +547,96 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+// ReadHeader reads data into the provided header slice from the stream. It
+// first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered
+// with partially read data, it is converted to `io.ErrUnexpectedEOF` to
+// indicate an unexpected end of the stream. The method returns any error
+// encountered during the read process or nil if the header was successfully
+// read.
+func (s *Stream) ReadHeader(header []byte) (err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return er
+ }
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return nil, er
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
+ }
+ return data, nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
}
t.windowHandler(n)
- return
+ return n, nil
+}
+
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
+ }
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// BytesReceived indicates whether any bytes have been received on this stream.
@@ -574,6 +682,7 @@ type ServerConfig struct {
ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
+ BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -612,6 +721,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
UseProxy bool
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
}
// NewClientTransport establishes the transport with the required ConnectOptions
@@ -673,7 +784,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -725,7 +836,7 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
@@ -798,7 +909,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
diff --git a/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go b/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5e7..eb42b19fb 100644
--- a/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go b/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 000000000..c37c58c02
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go b/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 000000000..228e9c2f2
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is added to
+// the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
diff --git a/tools/vendor/google.golang.org/grpc/mem/buffers.go b/tools/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 000000000..ecbf0b9a7
--- /dev/null
+++ b/tools/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,268 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ // Use the buffer's capacity instead of the length, otherwise buffers may
+ // not be reused under certain conditions. For example, if a large buffer
+ // is acquired from the pool, but fewer bytes than the buffering threshold
+ // are written to it, the buffer will not be returned to the pool.
+ if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
+type SliceBuffer []byte
+
+// ReadOnlyData returns the byte slice.
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/tools/vendor/google.golang.org/grpc/metadata/metadata.go b/tools/vendor/google.golang.org/grpc/metadata/metadata.go
index 1e9485fd6..d2e15253b 100644
--- a/tools/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/tools/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // Case insenitive comparison: MD is a map, and there's no guarantee
+ // Case insensitive comparison: MD is a map, and there's no guarantee
// that the MD attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
diff --git a/tools/vendor/google.golang.org/grpc/preloader.go b/tools/vendor/google.golang.org/grpc/preloader.go
index 73bd63364..e87a17f36 100644
--- a/tools/vendor/google.golang.org/grpc/preloader.go
+++ b/tools/vendor/google.golang.org/grpc/preloader.go
@@ -20,6 +20,7 @@ package grpc
import (
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -31,9 +32,10 @@ import (
// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
- encodedData []byte
+ encodedData mem.BufferSlice
hdr []byte
- payload []byte
+ payload mem.BufferSlice
+ pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
@@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
if err != nil {
return err
}
- p.encodedData = data
- compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+ materializedData := data.Materialize()
+ data.Free()
+ p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
+
+ // TODO: it should be possible to grab the bufferPool from the underlying
+ // stream implementation with a type cast to its actual type (such as
+ // addrConnStream) and accessing the buffer pool directly.
+ var compData mem.BufferSlice
+ compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
- p.hdr, p.payload = msgHeader(data, compData)
+
+ if p.pf.isCompressed() {
+ materializedCompData := compData.Materialize()
+ compData.Free()
+ compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
+ }
+
+ p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
return nil
}
diff --git a/tools/vendor/google.golang.org/grpc/regenerate.sh b/tools/vendor/google.golang.org/grpc/regenerate.sh
deleted file mode 100644
index 3edca296c..000000000
--- a/tools/vendor/google.golang.org/grpc/regenerate.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-# Copyright 2020 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eu -o pipefail
-
-WORKDIR=$(mktemp -d)
-
-function finish {
- rm -rf "$WORKDIR"
-}
-trap finish EXIT
-
-export GOBIN=${WORKDIR}/bin
-export PATH=${GOBIN}:${PATH}
-mkdir -p ${GOBIN}
-
-echo "remove existing generated files"
-# grpc_testing_not_regenerate/*.pb.go is not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
-
-echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
-(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
-
-echo "go install cmd/protoc-gen-go-grpc"
-(cd cmd/protoc-gen-go-grpc && go install .)
-
-echo "git clone https://github.com/grpc/grpc-proto"
-git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
-
-echo "git clone https://github.com/protocolbuffers/protobuf"
-git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
-
-# Pull in code.proto as a proto dependency
-mkdir -p ${WORKDIR}/googleapis/google/rpc
-echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
-curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
-
-mkdir -p ${WORKDIR}/out
-
-# Generates sources without the embed requirement
-LEGACY_SOURCES=(
- ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
- ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
- ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
- ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
- profiling/proto/service.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
-)
-
-# Generates only the new gRPC Service symbols
-SOURCES=(
- $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
- ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
- ${WORKDIR}/grpc-proto/grpc/testing/*.proto
- ${WORKDIR}/grpc-proto/grpc/core/*.proto
-)
-
-# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
-# import path of 'bar' in the generated code when 'foo.proto' is imported in
-# one of the sources.
-#
-# Note that the protos listed here are all for testing purposes. All protos to
-# be used externally should have a go_package option (and they don't need to be
-# listed here).
-OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
-Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
-
-for src in ${SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-for src in ${LEGACY_SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
-# current location. Move it into the right place.
-mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-
-# grpc_testing_not_regenerate/*.pb.go are not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
-
-cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/tools/vendor/google.golang.org/grpc/resolver_wrapper.go b/tools/vendor/google.golang.org/grpc/resolver_wrapper.go
index c5fb45236..23bb3fb25 100644
--- a/tools/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/tools/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
// any newly created ccResolverWrapper, except that close may be called instead.
func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
return
}
@@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error {
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccr.resolver == nil {
return
}
@@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.closed = true
ccr.mu.Unlock()
- ccr.serializer.Schedule(func(context.Context) {
+ ccr.serializer.TrySchedule(func(context.Context) {
if ccr.resolver == nil {
return
}
@@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P
// addChannelzTraceEvent adds a channelz trace event containing the new
// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ if !logger.V(0) && !channelz.IsOn() {
+ return
+ }
var updates []string
var oldSC, newSC *ServiceConfig
var oldOK, newOK bool
diff --git a/tools/vendor/google.golang.org/grpc/rpc_util.go b/tools/vendor/google.golang.org/grpc/rpc_util.go
index fdd49e6e9..aba1ae3e6 100644
--- a/tools/vendor/google.golang.org/grpc/rpc_util.go
+++ b/tools/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,7 +19,6 @@
package grpc
import (
- "bytes"
"compress/gzip"
"context"
"encoding/binary"
@@ -35,6 +34,7 @@ import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -220,8 +220,8 @@ type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.HeaderAddr, _ = attempt.s.Header()
}
@@ -242,8 +242,8 @@ type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.TrailerAddr = attempt.s.Trailer()
}
@@ -264,24 +264,20 @@ type PeerCallOption struct {
PeerAddr *peer.Peer
}
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
if x, ok := peer.FromContext(attempt.s.Context()); ok {
*o.PeerAddr = *x
}
}
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false and the
-// connection is in the TRANSIENT_FAILURE state, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
+// waitForReady is false, the RPC will fail immediately. Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
//
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
@@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
return nil
}
-func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
@@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
@@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
// ForceCodec returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
@@ -515,10 +511,50 @@ type ForceCodecCallOption struct {
}
func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV1Bridge(o.Codec)
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+ return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+ CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+ c.codec = o.CodecV2
+ return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -540,10 +576,10 @@ type CustomCodecCallOption struct {
}
func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV0Bridge(o.Codec)
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
@@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -581,19 +617,28 @@ const (
compressionMade payloadFormat = 1 // compressed
)
+func (pf payloadFormat) isCompressed() bool {
+ return pf == compressionMade
+}
+
+type streamReader interface {
+ ReadHeader(header []byte) error
+ Read(n int) (mem.BufferSlice, error)
+}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
- r io.Reader
+ r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
- // recvBufferPool is the pool of shared receive buffers.
- recvBufferPool SharedBufferPool
+ // bufferPool is the pool of shared receive buffers.
+ bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -608,14 +653,15 @@ type parser struct {
// - an error from the status package
//
// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+ err := p.r.ReadHeader(p.header[:])
+ if err != nil {
return 0, nil, err
}
- pf = payloadFormat(p.header[0])
+ pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
if length == 0 {
@@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- msg = p.recvBufferPool.Get(int(length))
- if _, err := p.r.Read(msg); err != nil {
+
+ data, err := p.r.Read(int(length))
+ if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
- return pf, msg, nil
+ return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
-func encode(c baseCodec, msg any) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(len(b)) > math.MaxUint32 {
+ if uint(b.Len()) > math.MaxUint32 {
+ b.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
return b, nil
@@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- if len(in) == 0 {
- return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+ if (compressor == nil && cp == nil) || in.Len() == 0 {
+ return nil, compressionNone, nil
}
+ var out mem.BufferSlice
+ w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
+ out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
- cbuf := &bytes.Buffer{}
if compressor != nil {
- z, err := compressor.Compress(cbuf)
+ z, err := compressor.Compress(w)
if err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ for _, b := range in {
+ if _, err := z.Write(b.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
+ }
}
if err := z.Close(); err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
} else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ // This is obviously really inefficient since it fully materializes the data, but
+ // there is no way around this with the old Compressor API. At least it attempts
+ // to return the buffer to the provider, in the hopes it can be reused (maybe
+ // even by a subsequent call to this very function).
+ buf := in.MaterializeToBuffer(pool)
+ defer buf.Free()
+ if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
}
}
- return cbuf.Bytes(), nil
+ return out, compressionMade, nil
}
const (
@@ -697,33 +752,36 @@ const (
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ hdr[0] = byte(pf)
+
+ var length uint32
+ if pf.isCompressed() {
+ length = uint32(compData.Len())
+ payload = compData
} else {
- hdr[0] = byte(compressionNone)
+ length = uint32(data.Len())
+ payload = data
}
// Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
+ binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+ return hdr, payload
}
-func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- CompressedLength: len(payload),
+ Length: dataLength,
+ WireLength: payloadLength + headerLen,
+ CompressedLength: payloadLength,
SentTime: t,
}
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@@ -731,7 +789,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if isServer {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
+ return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -741,104 +802,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
type payloadInfo struct {
compressedLength int // The compressed length got from wire.
- uncompressedBytes []byte
+ uncompressedBytes mem.BufferSlice
+}
+
+func (p *payloadInfo) free() {
+ if p != nil && p.uncompressedBytes != nil {
+ p.uncompressedBytes.Free()
+ }
}
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
//
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
// the buffer is no longer needed.
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
-) (uncompressedBuf []byte, cancel func(), err error) {
- pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+ pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return nil, nil, st.Err()
+ compressedLength := compressed.Len()
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+ compressed.Free()
+ return nil, st.Err()
}
var size int
- if pf == compressionMade {
+ if pf.isCompressed() {
+ defer compressed.Free()
+
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
- uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
+ var uncompressedBuf []byte
+ uncompressedBuf, err = dc.Do(compressed.Reader())
+ if err == nil {
+ out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
+ }
size = len(uncompressedBuf)
} else {
- uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
+ out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
}
if err != nil {
- return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
if size > maxReceiveMessageSize {
+ out.Free()
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
} else {
- uncompressedBuf = compressedBuf
+ out = compressed
}
if payInfo != nil {
- payInfo.compressedLength = len(compressedBuf)
- payInfo.uncompressedBytes = uncompressedBuf
-
- cancel = func() {}
- } else {
- cancel = func() {
- p.recvBufferPool.Put(&compressedBuf)
- }
+ payInfo.compressedLength = compressedLength
+ out.Ref()
+ payInfo.uncompressedBytes = out
}
- return uncompressedBuf, cancel, nil
+ return out, nil
}
// Using compressor, decompress d, returning data and size.
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
+ dcReader, err := compressor.Decompress(d.Reader())
if err != nil {
return nil, 0, err
}
- if sizer, ok := compressor.(interface {
- DecompressedSize(compressedBytes []byte) int
- }); ok {
- if size := sizer.DecompressedSize(d); size >= 0 {
- if size > maxReceiveMessageSize {
- return nil, size, nil
- }
- // size is used as an estimate to size the buffer, but we
- // will read more data if available.
- // +MinRead so ReadFrom will not reallocate if size is correct.
- //
- // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
- // we can also utilize the recv buffer pool here.
- buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
- bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return buf.Bytes(), int(bytesRead), err
- }
+
+ // TODO: Can/should this still be preserved with the new BufferSlice API? Are
+ // there any actual benefits to allocating a single large buffer instead of
+ // multiple smaller ones?
+ //if sizer, ok := compressor.(interface {
+ // DecompressedSize(compressedBytes []byte) int
+ //}); ok {
+ // if size := sizer.DecompressedSize(d); size >= 0 {
+ // if size > maxReceiveMessageSize {
+ // return nil, size, nil
+ // }
+ // // size is used as an estimate to size the buffer, but we
+ // // will read more data if available.
+ // // +MinRead so ReadFrom will not reallocate if size is correct.
+ // //
+ // // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
+ // // we can also utilize the recv buffer pool here.
+ // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+ // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ // return buf.Bytes(), int(bytesRead), err
+ // }
+ //}
+
+ var out mem.BufferSlice
+ _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ if err != nil {
+ out.Free()
+ return nil, 0, err
}
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return d, len(d), err
+ return out, out.Len(), nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+ data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
- defer cancel()
- if err := c.Unmarshal(buf, m); err != nil {
+ // If the codec wants its own reference to the data, it can get it. Otherwise, always
+ // free the buffers.
+ defer data.Free()
+
+ if err := c.Unmarshal(data, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
+
return nil
}
@@ -941,7 +1027,7 @@ func setCallInfoCodec(c *callInfo) error {
// encoding.Codec (Name vs. String method name). We only support
// setting content subtype from encoding.Codec to avoid a behavior
// change with the deprecated version.
- if ec, ok := c.codec.(encoding.Codec); ok {
+ if ec, ok := c.codec.(encoding.CodecV2); ok {
c.contentSubtype = strings.ToLower(ec.Name())
}
}
@@ -950,12 +1036,12 @@ func setCallInfoCodec(c *callInfo) error {
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
+ c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
+ c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
diff --git a/tools/vendor/google.golang.org/grpc/server.go b/tools/vendor/google.golang.org/grpc/server.go
index 89f8e4792..d1e1415a4 100644
--- a/tools/vendor/google.golang.org/grpc/server.go
+++ b/tools/vendor/google.golang.org/grpc/server.go
@@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -80,7 +81,7 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
- internal.RecvBufferPool = recvBufferPool
+ internal.BufferPool = bufferPool
}
var statusOK = status.New(codes.OK, "")
@@ -170,7 +171,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
- recvBufferPool SharedBufferPool
+ bufferPool mem.BufferPool
waitForHandlers bool
}
@@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
- recvBufferPool: nopBufferPool{},
+ bufferPool: mem.DefaultBufferPool(),
}
var globalServerOptions []ServerOption
@@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV0Bridge(codec)
})
}
@@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption {
// later release.
func ForceServerCodec(codec encoding.Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV1Bridge(codec)
+ })
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codecV2
})
}
@@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption {
})
}
-// RecvBufferPool returns a ServerOption that configures the server
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: StatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
- return recvBufferPool(bufferPool)
-}
-
-func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.recvBufferPool = bufferPool
+ o.bufferPool = bufferPool
})
}
@@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
-// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// serverWorker blocks on a *transport.Stream channel forever and waits for
// data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
@@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
+ BufferPool: s.opts.bufferPool,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport,
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
- compData, err := compress(data, cp, comp)
+
+ compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
+ data.Free()
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
+
+ hdr, payload := msgHeader(data, compData, pf)
+
+ defer func() {
+ compData.Free()
+ data.Free()
+ // payload does not need to be freed here, it is either data or compData, both of
+ // which are already freed.
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if payloadLen > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = t.Write(stream, hdr, payload, opts)
if err == nil {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
+ if len(s.opts.statsHandlers) != 0 {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+ }
}
}
return err
@@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+ d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
+ defer d.Free()
if channelz.IsOn() {
t.IncrMsgRecv()
}
df := func(v any) error {
- defer cancel()
-
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
+
for _, sh := range shs {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
- Length: len(d),
+ Length: d.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Data: d,
})
}
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: d,
+ Message: d.Materialize(),
}
for _, binlog := range binlogs {
binlog.Log(ctx, cm)
@@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
ctx: ctx,
t: t,
s: stream,
- p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
+ p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return s.opts.codec
}
if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
- codec := encoding.GetCodec(contentSubtype)
+ codec := getCodec(contentSubtype)
if codec == nil {
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
return codec
}
diff --git a/tools/vendor/google.golang.org/grpc/shared_buffer_pool.go b/tools/vendor/google.golang.org/grpc/shared_buffer_pool.go
deleted file mode 100644
index 48a64cfe8..000000000
--- a/tools/vendor/google.golang.org/grpc/shared_buffer_pool.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import "sync"
-
-// SharedBufferPool is a pool of buffers that can be shared, resulting in
-// decreased memory allocation. Currently, in gRPC-go, it is only utilized
-// for parsing incoming messages.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type SharedBufferPool interface {
- // Get returns a buffer with specified length from the pool.
- //
- // The returned byte slice may be not zero initialized.
- Get(length int) []byte
-
- // Put returns a buffer to the pool.
- Put(*[]byte)
-}
-
-// NewSharedBufferPool creates a simple SharedBufferPool with buckets
-// of different sizes to optimize memory usage. This prevents the pool from
-// wasting large amounts of memory, even when handling messages of varying sizes.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NewSharedBufferPool() SharedBufferPool {
- return &simpleSharedBufferPool{
- pools: [poolArraySize]simpleSharedBufferChildPool{
- newBytesPool(level0PoolMaxSize),
- newBytesPool(level1PoolMaxSize),
- newBytesPool(level2PoolMaxSize),
- newBytesPool(level3PoolMaxSize),
- newBytesPool(level4PoolMaxSize),
- newBytesPool(0),
- },
- }
-}
-
-// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
-type simpleSharedBufferPool struct {
- pools [poolArraySize]simpleSharedBufferChildPool
-}
-
-func (p *simpleSharedBufferPool) Get(size int) []byte {
- return p.pools[p.poolIdx(size)].Get(size)
-}
-
-func (p *simpleSharedBufferPool) Put(bs *[]byte) {
- p.pools[p.poolIdx(cap(*bs))].Put(bs)
-}
-
-func (p *simpleSharedBufferPool) poolIdx(size int) int {
- switch {
- case size <= level0PoolMaxSize:
- return level0PoolIdx
- case size <= level1PoolMaxSize:
- return level1PoolIdx
- case size <= level2PoolMaxSize:
- return level2PoolIdx
- case size <= level3PoolMaxSize:
- return level3PoolIdx
- case size <= level4PoolMaxSize:
- return level4PoolIdx
- default:
- return levelMaxPoolIdx
- }
-}
-
-const (
- level0PoolMaxSize = 16 // 16 B
- level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
- level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
- level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
- level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
-)
-
-const (
- level0PoolIdx = iota
- level1PoolIdx
- level2PoolIdx
- level3PoolIdx
- level4PoolIdx
- levelMaxPoolIdx
- poolArraySize
-)
-
-type simpleSharedBufferChildPool interface {
- Get(size int) []byte
- Put(any)
-}
-
-type bufferPool struct {
- sync.Pool
-
- defaultSize int
-}
-
-func (p *bufferPool) Get(size int) []byte {
- bs := p.Pool.Get().(*[]byte)
-
- if cap(*bs) < size {
- p.Pool.Put(bs)
-
- return make([]byte, size)
- }
-
- return (*bs)[:size]
-}
-
-func newBytesPool(size int) simpleSharedBufferChildPool {
- return &bufferPool{
- Pool: sync.Pool{
- New: func() any {
- bs := make([]byte, size)
- return &bs
- },
- },
- defaultSize: size,
- }
-}
-
-// nopBufferPool is a buffer pool just makes new buffer without pooling.
-type nopBufferPool struct {
-}
-
-func (nopBufferPool) Get(length int) []byte {
- return make([]byte, length)
-}
-
-func (nopBufferPool) Put(*[]byte) {
-}
diff --git a/tools/vendor/google.golang.org/grpc/stats/stats.go b/tools/vendor/google.golang.org/grpc/stats/stats.go
index fdb0bd651..71195c494 100644
--- a/tools/vendor/google.golang.org/grpc/stats/stats.go
+++ b/tools/vendor/google.golang.org/grpc/stats/stats.go
@@ -77,9 +77,6 @@ type InPayload struct {
// the call to HandleRPC which provides the InPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
@@ -150,9 +147,6 @@ type OutPayload struct {
// the call to HandleRPC which provides the OutPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
diff --git a/tools/vendor/google.golang.org/grpc/stream.go b/tools/vendor/google.golang.org/grpc/stream.go
index 8051ef5b5..bb2b2a216 100644
--- a/tools/vendor/google.golang.org/grpc/stream.go
+++ b/tools/vendor/google.golang.org/grpc/stream.go
@@ -41,6 +41,7 @@ import (
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs.attempt = a
return nil
}
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
@@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error {
}
a.s = s
a.ctx = s.Context()
- a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
+ a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -566,10 +567,15 @@ type clientStream struct {
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- onCommit func()
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
+ committed bool // active attempt committed for retry?
+ onCommit func()
+ replayBuffer []replayOp // operations to replay on retry
+ replayBufferSize int // current size of replayBuffer
+}
+
+type replayOp struct {
+ op func(a *csAttempt) error
+ cleanup func()
}
// csAttempt implements a single transport stream attempt within a
@@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() {
cs.onCommit()
}
cs.committed = true
- cs.buffer = nil
+ for _, op := range cs.replayBuffer {
+ if op.cleanup != nil {
+ op.cleanup()
+ }
+ }
+ cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
// the stream is canceled.
return err
}
- // Note that the first op in the replay buffer always sets cs.attempt
+ // Note that the first op in replayBuffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
@@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
// already be status errors.
return toRPCErr(op(cs.attempt))
}
- if len(cs.buffer) == 0 {
+ if len(cs.replayBuffer) == 0 {
// For the first op, which controls creation of the stream and
// assigns cs.attempt, we need to create a new attempt inline
// before executing the first op. On subsequent ops, the attempt
@@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD {
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
- for _, f := range cs.buffer {
- if err := f(attempt); err != nil {
+ for _, f := range cs.replayBuffer {
+ if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.replayBufferSize += sz
+ if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
+ cleanup()
return
}
- cs.buffer = append(cs.buffer, op)
+ cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
func (cs *clientStream) SendMsg(m any) (err error) {
@@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if payloadLen > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
+
+ // always take an extra ref in case data == payload (i.e. when the data isn't
+ // compressed). The original ref will always be freed by the deferred free above.
+ payload.Ref()
op := func(a *csAttempt) error {
- return a.sendMsg(m, hdr, payload, data)
+ return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
+ }
+
+ // onSuccess is invoked when the op is captured for a subsequent retry. If the
+ // stream was established by a previous message and therefore retries are
+ // disabled, onSuccess will not be invoked, and payloadRef can be freed
+ // immediately.
+ onSuccessCalled := false
+ err = cs.withRetry(op, func() {
+ cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+ onSuccessCalled = true
+ })
+ if !onSuccessCalled {
+ payload.Free()
}
- err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, cm)
@@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error {
var recvInfo *payloadInfo
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
+ defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
@@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
- Message: recvInfo.uncompressedBytes,
+ Message: recvInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, sm)
@@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error {
// RecvMsg. This also matches historical behavior.
return nil
}
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
@@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
-func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
return io.EOF
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
+ if len(a.statsHandlers) != 0 {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+ }
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
if !a.decompSet {
@@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
- if err != nil {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
if err == io.EOF {
if statusErr := a.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Length: len(payInfo.uncompressedBytes),
+ Length: payInfo.uncompressedBytes.Len(),
})
}
if channelz.IsOn() {
@@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (a *csAttempt) finish(err error) {
@@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
- as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
+ as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payld) > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ if payload.Len() > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompSet = true
}
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err != nil {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
if err == io.EOF {
if statusErr := as.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (as *addrConnStream) finish(err error) {
@@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if payloadLen > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
+
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
@@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
}
sm := &binarylog.ServerMessage{
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, sm)
@@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
}
return nil
@@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- Length: len(payInfo.uncompressedBytes),
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
@@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes,
+ Message: payInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, cm)
@@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+ return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, nil, 0, err
}
- compData, err := compress(data, cp, comp)
+ compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
- return nil, nil, nil, err
+ data.Free()
+ return nil, nil, nil, 0, err
}
- hdr, payload = msgHeader(data, compData)
- return hdr, payload, data, nil
+ hdr, payload = msgHeader(data, compData, pf)
+ return hdr, data, payload, pf, nil
}
diff --git a/tools/vendor/google.golang.org/grpc/stream_interfaces.go b/tools/vendor/google.golang.org/grpc/stream_interfaces.go
index 8b813529c..0037fee0b 100644
--- a/tools/vendor/google.golang.org/grpc/stream_interfaces.go
+++ b/tools/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -22,15 +22,35 @@ package grpc
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
type ServerStreamingClient[Res any] interface {
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
// ServerStreamingServer represents the server side of a server-streaming (one
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type ServerStreamingServer[Res any] interface {
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface {
// message stream and the type of the unary response message. It is used in
// generated code.
type ClientStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using CloseAndRecv().
Send(*Req) error
+
+ // CloseAndRecv closes the request stream and waits for the server's
+ // response. This method must be called once and only once after sending
+ // all request messages. Any error returned is implemented by the status
+ // package.
CloseAndRecv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
@@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface {
// requests, one response) RPC. It is generic over both the type of the request
// message stream and the type of the unary response message. It is used in
// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
type ClientStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseAndRecv on its
+ // ClientStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // SendAndClose sends a single response message to the client and closes
+ // the stream. This method must be called once and only once after all
+ // request messages have been processed. Recv should not be called after
+ // calling SendAndClose.
SendAndClose(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface {
// request message stream and the type of the response message stream. It is
// used in generated code.
type BidiStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using Recv().
Send(*Req) error
+
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, Trailer, and
+ // CloseSend functionality. No other methods in the ClientStream should be
+ // called directly.
ClientStream
}
@@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface {
// (many requests, many responses) RPC. It is generic over both the type of the
// request message stream and the type of the response message stream. It is
// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type BidiStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseSend on its
+ // BidiStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
diff --git a/tools/vendor/google.golang.org/grpc/version.go b/tools/vendor/google.golang.org/grpc/version.go
index bafaef99b..5a47094ae 100644
--- a/tools/vendor/google.golang.org/grpc/version.go
+++ b/tools/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.65.0"
+const Version = "1.68.1"
diff --git a/tools/vendor/gopkg.in/ini.v1/.editorconfig b/tools/vendor/gopkg.in/ini.v1/.editorconfig
deleted file mode 100644
index 4a2d9180f..000000000
--- a/tools/vendor/gopkg.in/ini.v1/.editorconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# http://editorconfig.org
-
-root = true
-
-[*]
-charset = utf-8
-end_of_line = lf
-insert_final_newline = true
-trim_trailing_whitespace = true
-
-[*_test.go]
-trim_trailing_whitespace = false
diff --git a/tools/vendor/gopkg.in/ini.v1/.gitignore b/tools/vendor/gopkg.in/ini.v1/.gitignore
deleted file mode 100644
index 588388bda..000000000
--- a/tools/vendor/gopkg.in/ini.v1/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-testdata/conf_out.ini
-ini.sublime-project
-ini.sublime-workspace
-testdata/conf_reflect.ini
-.idea
-/.vscode
-.DS_Store
diff --git a/tools/vendor/gopkg.in/ini.v1/.golangci.yml b/tools/vendor/gopkg.in/ini.v1/.golangci.yml
deleted file mode 100644
index 631e36925..000000000
--- a/tools/vendor/gopkg.in/ini.v1/.golangci.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-linters-settings:
- staticcheck:
- checks: [
- "all",
- "-SA1019" # There are valid use cases of strings.Title
- ]
- nakedret:
- max-func-lines: 0 # Disallow any unnamed return statement
-
-linters:
- enable:
- - deadcode
- - errcheck
- - gosimple
- - govet
- - ineffassign
- - staticcheck
- - structcheck
- - typecheck
- - unused
- - varcheck
- - nakedret
- - gofmt
- - rowserrcheck
- - unconvert
- - goimports
- - unparam
diff --git a/tools/vendor/gopkg.in/ini.v1/LICENSE b/tools/vendor/gopkg.in/ini.v1/LICENSE
deleted file mode 100644
index d361bbcdf..000000000
--- a/tools/vendor/gopkg.in/ini.v1/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright 2014 Unknwon
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/tools/vendor/gopkg.in/ini.v1/Makefile b/tools/vendor/gopkg.in/ini.v1/Makefile
deleted file mode 100644
index f3b0dae2d..000000000
--- a/tools/vendor/gopkg.in/ini.v1/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-.PHONY: build test bench vet coverage
-
-build: vet bench
-
-test:
- go test -v -cover -race
-
-bench:
- go test -v -cover -test.bench=. -test.benchmem
-
-vet:
- go vet
-
-coverage:
- go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/tools/vendor/gopkg.in/ini.v1/README.md b/tools/vendor/gopkg.in/ini.v1/README.md
deleted file mode 100644
index 30606d970..000000000
--- a/tools/vendor/gopkg.in/ini.v1/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# INI
-
-[](https://github.com/go-ini/ini/actions?query=branch%3Amain)
-[](https://codecov.io/gh/go-ini/ini)
-[](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
-[](https://sourcegraph.com/github.com/go-ini/ini)
-
-
-
-Package ini provides INI file read and write functionality in Go.
-
-## Features
-
-- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
-- Read with recursion values.
-- Read with parent-child sections.
-- Read with auto-increment key names.
-- Read with multiple-line values.
-- Read with tons of helper methods.
-- Read and convert values to Go types.
-- Read and **WRITE** comments of sections and keys.
-- Manipulate sections, keys and comments with ease.
-- Keep sections and keys in order as you parse and save.
-
-## Installation
-
-The minimum requirement of Go is **1.13**.
-
-```sh
-$ go get gopkg.in/ini.v1
-```
-
-Please add `-u` flag to update in the future.
-
-## Getting Help
-
-- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
-- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
-- 中国大陆镜像:https://ini.unknwon.cn
-
-## License
-
-This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/tools/vendor/gopkg.in/ini.v1/codecov.yml b/tools/vendor/gopkg.in/ini.v1/codecov.yml
deleted file mode 100644
index e02ec84bc..000000000
--- a/tools/vendor/gopkg.in/ini.v1/codecov.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-coverage:
- range: "60...95"
- status:
- project:
- default:
- threshold: 1%
- informational: true
- patch:
- defualt:
- only_pulls: true
- informational: true
-
-comment:
- layout: 'diff'
-
-github_checks: false
diff --git a/tools/vendor/gopkg.in/ini.v1/data_source.go b/tools/vendor/gopkg.in/ini.v1/data_source.go
deleted file mode 100644
index c3a541f1d..000000000
--- a/tools/vendor/gopkg.in/ini.v1/data_source.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2019 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
-)
-
-var (
- _ dataSource = (*sourceFile)(nil)
- _ dataSource = (*sourceData)(nil)
- _ dataSource = (*sourceReadCloser)(nil)
-)
-
-// dataSource is an interface that returns object which can be read and closed.
-type dataSource interface {
- ReadCloser() (io.ReadCloser, error)
-}
-
-// sourceFile represents an object that contains content on the local file system.
-type sourceFile struct {
- name string
-}
-
-func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
- return os.Open(s.name)
-}
-
-// sourceData represents an object that contains content in memory.
-type sourceData struct {
- data []byte
-}
-
-func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
- return ioutil.NopCloser(bytes.NewReader(s.data)), nil
-}
-
-// sourceReadCloser represents an input stream with Close method.
-type sourceReadCloser struct {
- reader io.ReadCloser
-}
-
-func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
- return s.reader, nil
-}
-
-func parseDataSource(source interface{}) (dataSource, error) {
- switch s := source.(type) {
- case string:
- return sourceFile{s}, nil
- case []byte:
- return &sourceData{s}, nil
- case io.ReadCloser:
- return &sourceReadCloser{s}, nil
- case io.Reader:
- return &sourceReadCloser{ioutil.NopCloser(s)}, nil
- default:
- return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
- }
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/deprecated.go b/tools/vendor/gopkg.in/ini.v1/deprecated.go
deleted file mode 100644
index 48b8e66d6..000000000
--- a/tools/vendor/gopkg.in/ini.v1/deprecated.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-var (
- // Deprecated: Use "DefaultSection" instead.
- DEFAULT_SECTION = DefaultSection
- // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
- AllCapsUnderscore = SnackCase
-)
diff --git a/tools/vendor/gopkg.in/ini.v1/error.go b/tools/vendor/gopkg.in/ini.v1/error.go
deleted file mode 100644
index f66bc94b8..000000000
--- a/tools/vendor/gopkg.in/ini.v1/error.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "fmt"
-)
-
-// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
-type ErrDelimiterNotFound struct {
- Line string
-}
-
-// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
-func IsErrDelimiterNotFound(err error) bool {
- _, ok := err.(ErrDelimiterNotFound)
- return ok
-}
-
-func (err ErrDelimiterNotFound) Error() string {
- return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
-}
-
-// ErrEmptyKeyName indicates the error type of no key name is found which there should be one.
-type ErrEmptyKeyName struct {
- Line string
-}
-
-// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName.
-func IsErrEmptyKeyName(err error) bool {
- _, ok := err.(ErrEmptyKeyName)
- return ok
-}
-
-func (err ErrEmptyKeyName) Error() string {
- return fmt.Sprintf("empty key name: %s", err.Line)
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/file.go b/tools/vendor/gopkg.in/ini.v1/file.go
deleted file mode 100644
index f8b22408b..000000000
--- a/tools/vendor/gopkg.in/ini.v1/file.go
+++ /dev/null
@@ -1,541 +0,0 @@
-// Copyright 2017 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "strings"
- "sync"
-)
-
-// File represents a combination of one or more INI files in memory.
-type File struct {
- options LoadOptions
- dataSources []dataSource
-
- // Should make things safe, but sometimes doesn't matter.
- BlockMode bool
- lock sync.RWMutex
-
- // To keep data in order.
- sectionList []string
- // To keep track of the index of a section with same name.
- // This meta list is only used with non-unique section names are allowed.
- sectionIndexes []int
-
- // Actual data is stored here.
- sections map[string][]*Section
-
- NameMapper
- ValueMapper
-}
-
-// newFile initializes File object with given data sources.
-func newFile(dataSources []dataSource, opts LoadOptions) *File {
- if len(opts.KeyValueDelimiters) == 0 {
- opts.KeyValueDelimiters = "=:"
- }
- if len(opts.KeyValueDelimiterOnWrite) == 0 {
- opts.KeyValueDelimiterOnWrite = "="
- }
- if len(opts.ChildSectionDelimiter) == 0 {
- opts.ChildSectionDelimiter = "."
- }
-
- return &File{
- BlockMode: true,
- dataSources: dataSources,
- sections: make(map[string][]*Section),
- options: opts,
- }
-}
-
-// Empty returns an empty file object.
-func Empty(opts ...LoadOptions) *File {
- var opt LoadOptions
- if len(opts) > 0 {
- opt = opts[0]
- }
-
- // Ignore error here, we are sure our data is good.
- f, _ := LoadSources(opt, []byte(""))
- return f
-}
-
-// NewSection creates a new section.
-func (f *File) NewSection(name string) (*Section, error) {
- if len(name) == 0 {
- return nil, errors.New("empty section name")
- }
-
- if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
- name = strings.ToLower(name)
- }
-
- if f.BlockMode {
- f.lock.Lock()
- defer f.lock.Unlock()
- }
-
- if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
- return f.sections[name][0], nil
- }
-
- f.sectionList = append(f.sectionList, name)
-
- // NOTE: Append to indexes must happen before appending to sections,
- // otherwise index will have off-by-one problem.
- f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
-
- sec := newSection(f, name)
- f.sections[name] = append(f.sections[name], sec)
-
- return sec, nil
-}
-
-// NewRawSection creates a new section with an unparseable body.
-func (f *File) NewRawSection(name, body string) (*Section, error) {
- section, err := f.NewSection(name)
- if err != nil {
- return nil, err
- }
-
- section.isRawSection = true
- section.rawBody = body
- return section, nil
-}
-
-// NewSections creates a list of sections.
-func (f *File) NewSections(names ...string) (err error) {
- for _, name := range names {
- if _, err = f.NewSection(name); err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetSection returns section by given name.
-func (f *File) GetSection(name string) (*Section, error) {
- secs, err := f.SectionsByName(name)
- if err != nil {
- return nil, err
- }
-
- return secs[0], err
-}
-
-// HasSection returns true if the file contains a section with given name.
-func (f *File) HasSection(name string) bool {
- section, _ := f.GetSection(name)
- return section != nil
-}
-
-// SectionsByName returns all sections with given name.
-func (f *File) SectionsByName(name string) ([]*Section, error) {
- if len(name) == 0 {
- name = DefaultSection
- }
- if f.options.Insensitive || f.options.InsensitiveSections {
- name = strings.ToLower(name)
- }
-
- if f.BlockMode {
- f.lock.RLock()
- defer f.lock.RUnlock()
- }
-
- secs := f.sections[name]
- if len(secs) == 0 {
- return nil, fmt.Errorf("section %q does not exist", name)
- }
-
- return secs, nil
-}
-
-// Section assumes named section exists and returns a zero-value when not.
-func (f *File) Section(name string) *Section {
- sec, err := f.GetSection(name)
- if err != nil {
- if name == "" {
- name = DefaultSection
- }
- sec, _ = f.NewSection(name)
- return sec
- }
- return sec
-}
-
-// SectionWithIndex assumes named section exists and returns a new section when not.
-func (f *File) SectionWithIndex(name string, index int) *Section {
- secs, err := f.SectionsByName(name)
- if err != nil || len(secs) <= index {
- // NOTE: It's OK here because the only possible error is empty section name,
- // but if it's empty, this piece of code won't be executed.
- newSec, _ := f.NewSection(name)
- return newSec
- }
-
- return secs[index]
-}
-
-// Sections returns a list of Section stored in the current instance.
-func (f *File) Sections() []*Section {
- if f.BlockMode {
- f.lock.RLock()
- defer f.lock.RUnlock()
- }
-
- sections := make([]*Section, len(f.sectionList))
- for i, name := range f.sectionList {
- sections[i] = f.sections[name][f.sectionIndexes[i]]
- }
- return sections
-}
-
-// ChildSections returns a list of child sections of given section name.
-func (f *File) ChildSections(name string) []*Section {
- return f.Section(name).ChildSections()
-}
-
-// SectionStrings returns list of section names.
-func (f *File) SectionStrings() []string {
- list := make([]string, len(f.sectionList))
- copy(list, f.sectionList)
- return list
-}
-
-// DeleteSection deletes a section or all sections with given name.
-func (f *File) DeleteSection(name string) {
- secs, err := f.SectionsByName(name)
- if err != nil {
- return
- }
-
- for i := 0; i < len(secs); i++ {
- // For non-unique sections, it is always needed to remove the first one so
- // in the next iteration, the subsequent section continue having index 0.
- // Ignoring the error as index 0 never returns an error.
- _ = f.DeleteSectionWithIndex(name, 0)
- }
-}
-
-// DeleteSectionWithIndex deletes a section with given name and index.
-func (f *File) DeleteSectionWithIndex(name string, index int) error {
- if !f.options.AllowNonUniqueSections && index != 0 {
- return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
- }
-
- if len(name) == 0 {
- name = DefaultSection
- }
- if f.options.Insensitive || f.options.InsensitiveSections {
- name = strings.ToLower(name)
- }
-
- if f.BlockMode {
- f.lock.Lock()
- defer f.lock.Unlock()
- }
-
- // Count occurrences of the sections
- occurrences := 0
-
- sectionListCopy := make([]string, len(f.sectionList))
- copy(sectionListCopy, f.sectionList)
-
- for i, s := range sectionListCopy {
- if s != name {
- continue
- }
-
- if occurrences == index {
- if len(f.sections[name]) <= 1 {
- delete(f.sections, name) // The last one in the map
- } else {
- f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
- }
-
- // Fix section lists
- f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
- f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
-
- } else if occurrences > index {
- // Fix the indices of all following sections with this name.
- f.sectionIndexes[i-1]--
- }
-
- occurrences++
- }
-
- return nil
-}
-
-func (f *File) reload(s dataSource) error {
- r, err := s.ReadCloser()
- if err != nil {
- return err
- }
- defer r.Close()
-
- return f.parse(r)
-}
-
-// Reload reloads and parses all data sources.
-func (f *File) Reload() (err error) {
- for _, s := range f.dataSources {
- if err = f.reload(s); err != nil {
- // In loose mode, we create an empty default section for nonexistent files.
- if os.IsNotExist(err) && f.options.Loose {
- _ = f.parse(bytes.NewBuffer(nil))
- continue
- }
- return err
- }
- if f.options.ShortCircuit {
- return nil
- }
- }
- return nil
-}
-
-// Append appends one or more data sources and reloads automatically.
-func (f *File) Append(source interface{}, others ...interface{}) error {
- ds, err := parseDataSource(source)
- if err != nil {
- return err
- }
- f.dataSources = append(f.dataSources, ds)
- for _, s := range others {
- ds, err = parseDataSource(s)
- if err != nil {
- return err
- }
- f.dataSources = append(f.dataSources, ds)
- }
- return f.Reload()
-}
-
-func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
- equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
-
- if PrettyFormat || PrettyEqual {
- equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
- }
-
- // Use buffer to make sure target is safe until finish encoding.
- buf := bytes.NewBuffer(nil)
- lastSectionIdx := len(f.sectionList) - 1
- for i, sname := range f.sectionList {
- sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
- if len(sec.Comment) > 0 {
- // Support multiline comments
- lines := strings.Split(sec.Comment, LineBreak)
- for i := range lines {
- if lines[i][0] != '#' && lines[i][0] != ';' {
- lines[i] = "; " + lines[i]
- } else {
- lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
- }
-
- if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
- if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
- return nil, err
- }
- } else {
- // Write nothing if default section is empty
- if len(sec.keyList) == 0 {
- continue
- }
- }
-
- isLastSection := i == lastSectionIdx
- if sec.isRawSection {
- if _, err := buf.WriteString(sec.rawBody); err != nil {
- return nil, err
- }
-
- if PrettySection && !isLastSection {
- // Put a line between sections
- if _, err := buf.WriteString(LineBreak); err != nil {
- return nil, err
- }
- }
- continue
- }
-
- // Count and generate alignment length and buffer spaces using the
- // longest key. Keys may be modified if they contain certain characters so
- // we need to take that into account in our calculation.
- alignLength := 0
- if PrettyFormat {
- for _, kname := range sec.keyList {
- keyLength := len(kname)
- // First case will surround key by ` and second by """
- if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
- keyLength += 2
- } else if strings.Contains(kname, "`") {
- keyLength += 6
- }
-
- if keyLength > alignLength {
- alignLength = keyLength
- }
- }
- }
- alignSpaces := bytes.Repeat([]byte(" "), alignLength)
-
- KeyList:
- for _, kname := range sec.keyList {
- key := sec.Key(kname)
- if len(key.Comment) > 0 {
- if len(indent) > 0 && sname != DefaultSection {
- buf.WriteString(indent)
- }
-
- // Support multiline comments
- lines := strings.Split(key.Comment, LineBreak)
- for i := range lines {
- if lines[i][0] != '#' && lines[i][0] != ';' {
- lines[i] = "; " + strings.TrimSpace(lines[i])
- } else {
- lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
- }
-
- if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- if len(indent) > 0 && sname != DefaultSection {
- buf.WriteString(indent)
- }
-
- switch {
- case key.isAutoIncrement:
- kname = "-"
- case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
- kname = "`" + kname + "`"
- case strings.Contains(kname, "`"):
- kname = `"""` + kname + `"""`
- }
-
- writeKeyValue := func(val string) (bool, error) {
- if _, err := buf.WriteString(kname); err != nil {
- return false, err
- }
-
- if key.isBooleanType {
- buf.WriteString(LineBreak)
- return true, nil
- }
-
- // Write out alignment spaces before "=" sign
- if PrettyFormat {
- buf.Write(alignSpaces[:alignLength-len(kname)])
- }
-
- // In case key value contains "\n", "`", "\"", "#" or ";"
- if strings.ContainsAny(val, "\n`") {
- val = `"""` + val + `"""`
- } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
- val = "`" + val + "`"
- } else if len(strings.TrimSpace(val)) != len(val) {
- val = `"` + val + `"`
- }
- if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
- return false, err
- }
- return false, nil
- }
-
- shadows := key.ValueWithShadows()
- if len(shadows) == 0 {
- if _, err := writeKeyValue(""); err != nil {
- return nil, err
- }
- }
-
- for _, val := range shadows {
- exitLoop, err := writeKeyValue(val)
- if err != nil {
- return nil, err
- } else if exitLoop {
- continue KeyList
- }
- }
-
- for _, val := range key.nestedValues {
- if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- if PrettySection && !isLastSection {
- // Put a line between sections
- if _, err := buf.WriteString(LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- return buf, nil
-}
-
-// WriteToIndent writes content into io.Writer with given indention.
-// If PrettyFormat has been set to be true,
-// it will align "=" sign with spaces under each section.
-func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
- buf, err := f.writeToBuffer(indent)
- if err != nil {
- return 0, err
- }
- return buf.WriteTo(w)
-}
-
-// WriteTo writes file content into io.Writer.
-func (f *File) WriteTo(w io.Writer) (int64, error) {
- return f.WriteToIndent(w, "")
-}
-
-// SaveToIndent writes content to file system with given value indention.
-func (f *File) SaveToIndent(filename, indent string) error {
- // Note: Because we are truncating with os.Create,
- // so it's safer to save to a temporary file location and rename after done.
- buf, err := f.writeToBuffer(indent)
- if err != nil {
- return err
- }
-
- return ioutil.WriteFile(filename, buf.Bytes(), 0666)
-}
-
-// SaveTo writes content to file system.
-func (f *File) SaveTo(filename string) error {
- return f.SaveToIndent(filename, "")
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/helper.go b/tools/vendor/gopkg.in/ini.v1/helper.go
deleted file mode 100644
index f9d80a682..000000000
--- a/tools/vendor/gopkg.in/ini.v1/helper.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2019 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-func inSlice(str string, s []string) bool {
- for _, v := range s {
- if str == v {
- return true
- }
- }
- return false
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/ini.go b/tools/vendor/gopkg.in/ini.v1/ini.go
deleted file mode 100644
index 99e7f8651..000000000
--- a/tools/vendor/gopkg.in/ini.v1/ini.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-// Package ini provides INI file read and write functionality in Go.
-package ini
-
-import (
- "os"
- "regexp"
- "runtime"
- "strings"
-)
-
-const (
- // Maximum allowed depth when recursively substituing variable names.
- depthValues = 99
-)
-
-var (
- // DefaultSection is the name of default section. You can use this var or the string literal.
- // In most of cases, an empty string is all you need to access the section.
- DefaultSection = "DEFAULT"
-
- // LineBreak is the delimiter to determine or compose a new line.
- // This variable will be changed to "\r\n" automatically on Windows at package init time.
- LineBreak = "\n"
-
- // Variable regexp pattern: %(variable)s
- varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
-
- // DefaultHeader explicitly writes default section header.
- DefaultHeader = false
-
- // PrettySection indicates whether to put a line between sections.
- PrettySection = true
- // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
- // or reduce all possible spaces for compact format.
- PrettyFormat = true
- // PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
- PrettyEqual = false
- // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
- DefaultFormatLeft = ""
- // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
- DefaultFormatRight = ""
-)
-
-var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
-
-func init() {
- if runtime.GOOS == "windows" && !inTest {
- LineBreak = "\r\n"
- }
-}
-
-// LoadOptions contains all customized options used for load data source(s).
-type LoadOptions struct {
- // Loose indicates whether the parser should ignore nonexistent files or return error.
- Loose bool
- // Insensitive indicates whether the parser forces all section and key names to lowercase.
- Insensitive bool
- // InsensitiveSections indicates whether the parser forces all section to lowercase.
- InsensitiveSections bool
- // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
- InsensitiveKeys bool
- // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
- IgnoreContinuation bool
- // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
- IgnoreInlineComment bool
- // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
- SkipUnrecognizableLines bool
- // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
- ShortCircuit bool
- // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
- // This type of keys are mostly used in my.cnf.
- AllowBooleanKeys bool
- // AllowShadows indicates whether to keep track of keys with same name under same section.
- AllowShadows bool
- // AllowNestedValues indicates whether to allow AWS-like nested values.
- // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
- AllowNestedValues bool
- // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
- // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
- // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
- // than the first line of the value.
- AllowPythonMultilineValues bool
- // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
- // Docs: https://docs.python.org/2/library/configparser.html
- // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
- // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
- SpaceBeforeInlineComment bool
- // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
- // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
- UnescapeValueDoubleQuotes bool
- // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
- // when value is NOT surrounded by any quotes.
- // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
- UnescapeValueCommentSymbols bool
- // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
- // conform to key/value pairs. Specify the names of those blocks here.
- UnparseableSections []string
- // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
- KeyValueDelimiters string
- // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
- KeyValueDelimiterOnWrite string
- // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
- ChildSectionDelimiter string
- // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
- PreserveSurroundedQuote bool
- // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
- DebugFunc DebugFunc
- // ReaderBufferSize is the buffer size of the reader in bytes.
- ReaderBufferSize int
- // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
- AllowNonUniqueSections bool
- // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated.
- AllowDuplicateShadowValues bool
-}
-
-// DebugFunc is the type of function called to log parse events.
-type DebugFunc func(message string)
-
-// LoadSources allows caller to apply customized options for loading from data source(s).
-func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
- sources := make([]dataSource, len(others)+1)
- sources[0], err = parseDataSource(source)
- if err != nil {
- return nil, err
- }
- for i := range others {
- sources[i+1], err = parseDataSource(others[i])
- if err != nil {
- return nil, err
- }
- }
- f := newFile(sources, opts)
- if err = f.Reload(); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// Load loads and parses from INI data sources.
-// Arguments can be mixed of file name with string type, or raw data in []byte.
-// It will return error if list contains nonexistent files.
-func Load(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{}, source, others...)
-}
-
-// LooseLoad has exactly same functionality as Load function
-// except it ignores nonexistent files instead of returning error.
-func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{Loose: true}, source, others...)
-}
-
-// InsensitiveLoad has exactly same functionality as Load function
-// except it forces all section and key names to be lowercased.
-func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{Insensitive: true}, source, others...)
-}
-
-// ShadowLoad has exactly same functionality as Load function
-// except it allows have shadow keys.
-func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/key.go b/tools/vendor/gopkg.in/ini.v1/key.go
deleted file mode 100644
index a19d9f38e..000000000
--- a/tools/vendor/gopkg.in/ini.v1/key.go
+++ /dev/null
@@ -1,837 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// Key represents a key under a section.
-type Key struct {
- s *Section
- Comment string
- name string
- value string
- isAutoIncrement bool
- isBooleanType bool
-
- isShadow bool
- shadows []*Key
-
- nestedValues []string
-}
-
-// newKey simply return a key object with given values.
-func newKey(s *Section, name, val string) *Key {
- return &Key{
- s: s,
- name: name,
- value: val,
- }
-}
-
-func (k *Key) addShadow(val string) error {
- if k.isShadow {
- return errors.New("cannot add shadow to another shadow key")
- } else if k.isAutoIncrement || k.isBooleanType {
- return errors.New("cannot add shadow to auto-increment or boolean key")
- }
-
- if !k.s.f.options.AllowDuplicateShadowValues {
- // Deduplicate shadows based on their values.
- if k.value == val {
- return nil
- }
- for i := range k.shadows {
- if k.shadows[i].value == val {
- return nil
- }
- }
- }
-
- shadow := newKey(k.s, k.name, val)
- shadow.isShadow = true
- k.shadows = append(k.shadows, shadow)
- return nil
-}
-
-// AddShadow adds a new shadow key to itself.
-func (k *Key) AddShadow(val string) error {
- if !k.s.f.options.AllowShadows {
- return errors.New("shadow key is not allowed")
- }
- return k.addShadow(val)
-}
-
-func (k *Key) addNestedValue(val string) error {
- if k.isAutoIncrement || k.isBooleanType {
- return errors.New("cannot add nested value to auto-increment or boolean key")
- }
-
- k.nestedValues = append(k.nestedValues, val)
- return nil
-}
-
-// AddNestedValue adds a nested value to the key.
-func (k *Key) AddNestedValue(val string) error {
- if !k.s.f.options.AllowNestedValues {
- return errors.New("nested value is not allowed")
- }
- return k.addNestedValue(val)
-}
-
-// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
-type ValueMapper func(string) string
-
-// Name returns name of key.
-func (k *Key) Name() string {
- return k.name
-}
-
-// Value returns raw value of key for performance purpose.
-func (k *Key) Value() string {
- return k.value
-}
-
-// ValueWithShadows returns raw values of key and its shadows if any. Shadow
-// keys with empty values are ignored from the returned list.
-func (k *Key) ValueWithShadows() []string {
- if len(k.shadows) == 0 {
- if k.value == "" {
- return []string{}
- }
- return []string{k.value}
- }
-
- vals := make([]string, 0, len(k.shadows)+1)
- if k.value != "" {
- vals = append(vals, k.value)
- }
- for _, s := range k.shadows {
- if s.value != "" {
- vals = append(vals, s.value)
- }
- }
- return vals
-}
-
-// NestedValues returns nested values stored in the key.
-// It is possible returned value is nil if no nested values stored in the key.
-func (k *Key) NestedValues() []string {
- return k.nestedValues
-}
-
-// transformValue takes a raw value and transforms to its final string.
-func (k *Key) transformValue(val string) string {
- if k.s.f.ValueMapper != nil {
- val = k.s.f.ValueMapper(val)
- }
-
- // Fail-fast if no indicate char found for recursive value
- if !strings.Contains(val, "%") {
- return val
- }
- for i := 0; i < depthValues; i++ {
- vr := varPattern.FindString(val)
- if len(vr) == 0 {
- break
- }
-
- // Take off leading '%(' and trailing ')s'.
- noption := vr[2 : len(vr)-2]
-
- // Search in the same section.
- // If not found or found the key itself, then search again in default section.
- nk, err := k.s.GetKey(noption)
- if err != nil || k == nk {
- nk, _ = k.s.f.Section("").GetKey(noption)
- if nk == nil {
- // Stop when no results found in the default section,
- // and returns the value as-is.
- break
- }
- }
-
- // Substitute by new value and take off leading '%(' and trailing ')s'.
- val = strings.Replace(val, vr, nk.value, -1)
- }
- return val
-}
-
-// String returns string representation of value.
-func (k *Key) String() string {
- return k.transformValue(k.value)
-}
-
-// Validate accepts a validate function which can
-// return modifed result as key value.
-func (k *Key) Validate(fn func(string) string) string {
- return fn(k.String())
-}
-
-// parseBool returns the boolean value represented by the string.
-//
-// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
-// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
-// Any other value returns an error.
-func parseBool(str string) (value bool, err error) {
- switch str {
- case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
- return true, nil
- case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
- return false, nil
- }
- return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
-}
-
-// Bool returns bool type value.
-func (k *Key) Bool() (bool, error) {
- return parseBool(k.String())
-}
-
-// Float64 returns float64 type value.
-func (k *Key) Float64() (float64, error) {
- return strconv.ParseFloat(k.String(), 64)
-}
-
-// Int returns int type value.
-func (k *Key) Int() (int, error) {
- v, err := strconv.ParseInt(k.String(), 0, 64)
- return int(v), err
-}
-
-// Int64 returns int64 type value.
-func (k *Key) Int64() (int64, error) {
- return strconv.ParseInt(k.String(), 0, 64)
-}
-
-// Uint returns uint type valued.
-func (k *Key) Uint() (uint, error) {
- u, e := strconv.ParseUint(k.String(), 0, 64)
- return uint(u), e
-}
-
-// Uint64 returns uint64 type value.
-func (k *Key) Uint64() (uint64, error) {
- return strconv.ParseUint(k.String(), 0, 64)
-}
-
-// Duration returns time.Duration type value.
-func (k *Key) Duration() (time.Duration, error) {
- return time.ParseDuration(k.String())
-}
-
-// TimeFormat parses with given format and returns time.Time type value.
-func (k *Key) TimeFormat(format string) (time.Time, error) {
- return time.Parse(format, k.String())
-}
-
-// Time parses with RFC3339 format and returns time.Time type value.
-func (k *Key) Time() (time.Time, error) {
- return k.TimeFormat(time.RFC3339)
-}
-
-// MustString returns default value if key value is empty.
-func (k *Key) MustString(defaultVal string) string {
- val := k.String()
- if len(val) == 0 {
- k.value = defaultVal
- return defaultVal
- }
- return val
-}
-
-// MustBool always returns value without error,
-// it returns false if error occurs.
-func (k *Key) MustBool(defaultVal ...bool) bool {
- val, err := k.Bool()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatBool(defaultVal[0])
- return defaultVal[0]
- }
- return val
-}
-
-// MustFloat64 always returns value without error,
-// it returns 0.0 if error occurs.
-func (k *Key) MustFloat64(defaultVal ...float64) float64 {
- val, err := k.Float64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
- return defaultVal[0]
- }
- return val
-}
-
-// MustInt always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustInt(defaultVal ...int) int {
- val, err := k.Int()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustInt64 always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustInt64(defaultVal ...int64) int64 {
- val, err := k.Int64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatInt(defaultVal[0], 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustUint always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustUint(defaultVal ...uint) uint {
- val, err := k.Uint()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustUint64 always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
- val, err := k.Uint64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatUint(defaultVal[0], 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustDuration always returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
- val, err := k.Duration()
- if len(defaultVal) > 0 && err != nil {
- k.value = defaultVal[0].String()
- return defaultVal[0]
- }
- return val
-}
-
-// MustTimeFormat always parses with given format and returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
- val, err := k.TimeFormat(format)
- if len(defaultVal) > 0 && err != nil {
- k.value = defaultVal[0].Format(format)
- return defaultVal[0]
- }
- return val
-}
-
-// MustTime always parses with RFC3339 format and returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
- return k.MustTimeFormat(time.RFC3339, defaultVal...)
-}
-
-// In always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) In(defaultVal string, candidates []string) string {
- val := k.String()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InFloat64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
- val := k.MustFloat64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InInt always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InInt(defaultVal int, candidates []int) int {
- val := k.MustInt()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InInt64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
- val := k.MustInt64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InUint always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
- val := k.MustUint()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InUint64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
- val := k.MustUint64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InTimeFormat always parses with given format and returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
- val := k.MustTimeFormat(format)
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InTime always parses with RFC3339 format and returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
- return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
-}
-
-// RangeFloat64 checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
- val := k.MustFloat64()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeInt checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeInt(defaultVal, min, max int) int {
- val := k.MustInt()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeInt64 checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
- val := k.MustInt64()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeTimeFormat checks if value with given format is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
- val := k.MustTimeFormat(format)
- if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
- return defaultVal
- }
- return val
-}
-
-// RangeTime checks if value with RFC3339 format is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
- return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
-}
-
-// Strings returns list of string divided by given delimiter.
-func (k *Key) Strings(delim string) []string {
- str := k.String()
- if len(str) == 0 {
- return []string{}
- }
-
- runes := []rune(str)
- vals := make([]string, 0, 2)
- var buf bytes.Buffer
- escape := false
- idx := 0
- for {
- if escape {
- escape = false
- if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
- buf.WriteRune('\\')
- }
- buf.WriteRune(runes[idx])
- } else {
- if runes[idx] == '\\' {
- escape = true
- } else if strings.HasPrefix(string(runes[idx:]), delim) {
- idx += len(delim) - 1
- vals = append(vals, strings.TrimSpace(buf.String()))
- buf.Reset()
- } else {
- buf.WriteRune(runes[idx])
- }
- }
- idx++
- if idx == len(runes) {
- break
- }
- }
-
- if buf.Len() > 0 {
- vals = append(vals, strings.TrimSpace(buf.String()))
- }
-
- return vals
-}
-
-// StringsWithShadows returns list of string divided by given delimiter.
-// Shadows will also be appended if any.
-func (k *Key) StringsWithShadows(delim string) []string {
- vals := k.ValueWithShadows()
- results := make([]string, 0, len(vals)*2)
- for i := range vals {
- if len(vals) == 0 {
- continue
- }
-
- results = append(results, strings.Split(vals[i], delim)...)
- }
-
- for i := range results {
- results[i] = k.transformValue(strings.TrimSpace(results[i]))
- }
- return results
-}
-
-// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Float64s(delim string) []float64 {
- vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
- return vals
-}
-
-// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Ints(delim string) []int {
- vals, _ := k.parseInts(k.Strings(delim), true, false)
- return vals
-}
-
-// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Int64s(delim string) []int64 {
- vals, _ := k.parseInt64s(k.Strings(delim), true, false)
- return vals
-}
-
-// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Uints(delim string) []uint {
- vals, _ := k.parseUints(k.Strings(delim), true, false)
- return vals
-}
-
-// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Uint64s(delim string) []uint64 {
- vals, _ := k.parseUint64s(k.Strings(delim), true, false)
- return vals
-}
-
-// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Bools(delim string) []bool {
- vals, _ := k.parseBools(k.Strings(delim), true, false)
- return vals
-}
-
-// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
-// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
-func (k *Key) TimesFormat(format, delim string) []time.Time {
- vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
- return vals
-}
-
-// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
-// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
-func (k *Key) Times(delim string) []time.Time {
- return k.TimesFormat(time.RFC3339, delim)
-}
-
-// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
-// it will not be included to result list.
-func (k *Key) ValidFloat64s(delim string) []float64 {
- vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
-// not be included to result list.
-func (k *Key) ValidInts(delim string) []int {
- vals, _ := k.parseInts(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
-// then it will not be included to result list.
-func (k *Key) ValidInt64s(delim string) []int64 {
- vals, _ := k.parseInt64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
-// then it will not be included to result list.
-func (k *Key) ValidUints(delim string) []uint {
- vals, _ := k.parseUints(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
-// integer, then it will not be included to result list.
-func (k *Key) ValidUint64s(delim string) []uint64 {
- vals, _ := k.parseUint64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
-// integer, then it will not be included to result list.
-func (k *Key) ValidBools(delim string) []bool {
- vals, _ := k.parseBools(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
-func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
- vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
- return vals
-}
-
-// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
-func (k *Key) ValidTimes(delim string) []time.Time {
- return k.ValidTimesFormat(time.RFC3339, delim)
-}
-
-// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
- return k.parseFloat64s(k.Strings(delim), false, true)
-}
-
-// StrictInts returns list of int divided by given delimiter or error on first invalid input.
-func (k *Key) StrictInts(delim string) ([]int, error) {
- return k.parseInts(k.Strings(delim), false, true)
-}
-
-// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictInt64s(delim string) ([]int64, error) {
- return k.parseInt64s(k.Strings(delim), false, true)
-}
-
-// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
-func (k *Key) StrictUints(delim string) ([]uint, error) {
- return k.parseUints(k.Strings(delim), false, true)
-}
-
-// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
- return k.parseUint64s(k.Strings(delim), false, true)
-}
-
-// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
-func (k *Key) StrictBools(delim string) ([]bool, error) {
- return k.parseBools(k.Strings(delim), false, true)
-}
-
-// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
-// or error on first invalid input.
-func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
- return k.parseTimesFormat(format, k.Strings(delim), false, true)
-}
-
-// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
-// or error on first invalid input.
-func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
- return k.StrictTimesFormat(time.RFC3339, delim)
-}
-
-// parseBools transforms strings to bools.
-func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
- vals := make([]bool, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := parseBool(str)
- return val, err
- }
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, val.(bool))
- }
- }
- return vals, err
-}
-
-// parseFloat64s transforms strings to float64s.
-func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
- vals := make([]float64, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := strconv.ParseFloat(str, 64)
- return val, err
- }
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, val.(float64))
- }
- }
- return vals, err
-}
-
-// parseInts transforms strings to ints.
-func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
- vals := make([]int, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := strconv.ParseInt(str, 0, 64)
- return val, err
- }
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, int(val.(int64)))
- }
- }
- return vals, err
-}
-
-// parseInt64s transforms strings to int64s.
-func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
- vals := make([]int64, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := strconv.ParseInt(str, 0, 64)
- return val, err
- }
-
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, val.(int64))
- }
- }
- return vals, err
-}
-
-// parseUints transforms strings to uints.
-func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
- vals := make([]uint, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := strconv.ParseUint(str, 0, 64)
- return val, err
- }
-
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, uint(val.(uint64)))
- }
- }
- return vals, err
-}
-
-// parseUint64s transforms strings to uint64s.
-func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
- vals := make([]uint64, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := strconv.ParseUint(str, 0, 64)
- return val, err
- }
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, val.(uint64))
- }
- }
- return vals, err
-}
-
-type Parser func(str string) (interface{}, error)
-
-// parseTimesFormat transforms strings to times in given format.
-func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
- vals := make([]time.Time, 0, len(strs))
- parser := func(str string) (interface{}, error) {
- val, err := time.Parse(format, str)
- return val, err
- }
- rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
- if err == nil {
- for _, val := range rawVals {
- vals = append(vals, val.(time.Time))
- }
- }
- return vals, err
-}
-
-// doParse transforms strings to different types
-func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
- vals := make([]interface{}, 0, len(strs))
- for _, str := range strs {
- val, err := parser(str)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// SetValue changes key value.
-func (k *Key) SetValue(v string) {
- if k.s.f.BlockMode {
- k.s.f.lock.Lock()
- defer k.s.f.lock.Unlock()
- }
-
- k.value = v
- k.s.keysHash[k.name] = v
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/parser.go b/tools/vendor/gopkg.in/ini.v1/parser.go
deleted file mode 100644
index 44fc526c2..000000000
--- a/tools/vendor/gopkg.in/ini.v1/parser.go
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2015 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "regexp"
- "strconv"
- "strings"
- "unicode"
-)
-
-const minReaderBufferSize = 4096
-
-var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
-
-type parserOptions struct {
- IgnoreContinuation bool
- IgnoreInlineComment bool
- AllowPythonMultilineValues bool
- SpaceBeforeInlineComment bool
- UnescapeValueDoubleQuotes bool
- UnescapeValueCommentSymbols bool
- PreserveSurroundedQuote bool
- DebugFunc DebugFunc
- ReaderBufferSize int
-}
-
-type parser struct {
- buf *bufio.Reader
- options parserOptions
-
- isEOF bool
- count int
- comment *bytes.Buffer
-}
-
-func (p *parser) debug(format string, args ...interface{}) {
- if p.options.DebugFunc != nil {
- p.options.DebugFunc(fmt.Sprintf(format, args...))
- }
-}
-
-func newParser(r io.Reader, opts parserOptions) *parser {
- size := opts.ReaderBufferSize
- if size < minReaderBufferSize {
- size = minReaderBufferSize
- }
-
- return &parser{
- buf: bufio.NewReaderSize(r, size),
- options: opts,
- count: 1,
- comment: &bytes.Buffer{},
- }
-}
-
-// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
-// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
-func (p *parser) BOM() error {
- mask, err := p.buf.Peek(2)
- if err != nil && err != io.EOF {
- return err
- } else if len(mask) < 2 {
- return nil
- }
-
- switch {
- case mask[0] == 254 && mask[1] == 255:
- fallthrough
- case mask[0] == 255 && mask[1] == 254:
- _, err = p.buf.Read(mask)
- if err != nil {
- return err
- }
- case mask[0] == 239 && mask[1] == 187:
- mask, err := p.buf.Peek(3)
- if err != nil && err != io.EOF {
- return err
- } else if len(mask) < 3 {
- return nil
- }
- if mask[2] == 191 {
- _, err = p.buf.Read(mask)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (p *parser) readUntil(delim byte) ([]byte, error) {
- data, err := p.buf.ReadBytes(delim)
- if err != nil {
- if err == io.EOF {
- p.isEOF = true
- } else {
- return nil, err
- }
- }
- return data, nil
-}
-
-func cleanComment(in []byte) ([]byte, bool) {
- i := bytes.IndexAny(in, "#;")
- if i == -1 {
- return nil, false
- }
- return in[i:], true
-}
-
-func readKeyName(delimiters string, in []byte) (string, int, error) {
- line := string(in)
-
- // Check if key name surrounded by quotes.
- var keyQuote string
- if line[0] == '"' {
- if len(line) > 6 && line[0:3] == `"""` {
- keyQuote = `"""`
- } else {
- keyQuote = `"`
- }
- } else if line[0] == '`' {
- keyQuote = "`"
- }
-
- // Get out key name
- var endIdx int
- if len(keyQuote) > 0 {
- startIdx := len(keyQuote)
- // FIXME: fail case -> """"""name"""=value
- pos := strings.Index(line[startIdx:], keyQuote)
- if pos == -1 {
- return "", -1, fmt.Errorf("missing closing key quote: %s", line)
- }
- pos += startIdx
-
- // Find key-value delimiter
- i := strings.IndexAny(line[pos+startIdx:], delimiters)
- if i < 0 {
- return "", -1, ErrDelimiterNotFound{line}
- }
- endIdx = pos + i
- return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
- }
-
- endIdx = strings.IndexAny(line, delimiters)
- if endIdx < 0 {
- return "", -1, ErrDelimiterNotFound{line}
- }
- if endIdx == 0 {
- return "", -1, ErrEmptyKeyName{line}
- }
-
- return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
-}
-
-func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
- for {
- data, err := p.readUntil('\n')
- if err != nil {
- return "", err
- }
- next := string(data)
-
- pos := strings.LastIndex(next, valQuote)
- if pos > -1 {
- val += next[:pos]
-
- comment, has := cleanComment([]byte(next[pos:]))
- if has {
- p.comment.Write(bytes.TrimSpace(comment))
- }
- break
- }
- val += next
- if p.isEOF {
- return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
- }
- }
- return val, nil
-}
-
-func (p *parser) readContinuationLines(val string) (string, error) {
- for {
- data, err := p.readUntil('\n')
- if err != nil {
- return "", err
- }
- next := strings.TrimSpace(string(data))
-
- if len(next) == 0 {
- break
- }
- val += next
- if val[len(val)-1] != '\\' {
- break
- }
- val = val[:len(val)-1]
- }
- return val, nil
-}
-
-// hasSurroundedQuote check if and only if the first and last characters
-// are quotes \" or \'.
-// It returns false if any other parts also contain same kind of quotes.
-func hasSurroundedQuote(in string, quote byte) bool {
- return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
- strings.IndexByte(in[1:], quote) == len(in)-2
-}
-
-func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
-
- line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
- if len(line) == 0 {
- if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
- return p.readPythonMultilines(line, bufferSize)
- }
- return "", nil
- }
-
- var valQuote string
- if len(line) > 3 && line[0:3] == `"""` {
- valQuote = `"""`
- } else if line[0] == '`' {
- valQuote = "`"
- } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
- valQuote = `"`
- }
-
- if len(valQuote) > 0 {
- startIdx := len(valQuote)
- pos := strings.LastIndex(line[startIdx:], valQuote)
- // Check for multi-line value
- if pos == -1 {
- return p.readMultilines(line, line[startIdx:], valQuote)
- }
-
- if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
- return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
- }
- return line[startIdx : pos+startIdx], nil
- }
-
- lastChar := line[len(line)-1]
- // Won't be able to reach here if value only contains whitespace
- line = strings.TrimSpace(line)
- trimmedLastChar := line[len(line)-1]
-
- // Check continuation lines when desired
- if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
- return p.readContinuationLines(line[:len(line)-1])
- }
-
- // Check if ignore inline comment
- if !p.options.IgnoreInlineComment {
- var i int
- if p.options.SpaceBeforeInlineComment {
- i = strings.Index(line, " #")
- if i == -1 {
- i = strings.Index(line, " ;")
- }
-
- } else {
- i = strings.IndexAny(line, "#;")
- }
-
- if i > -1 {
- p.comment.WriteString(line[i:])
- line = strings.TrimSpace(line[:i])
- }
-
- }
-
- // Trim single and double quotes
- if (hasSurroundedQuote(line, '\'') ||
- hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
- line = line[1 : len(line)-1]
- } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
- line = strings.ReplaceAll(line, `\;`, ";")
- line = strings.ReplaceAll(line, `\#`, "#")
- } else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
- return p.readPythonMultilines(line, bufferSize)
- }
-
- return line, nil
-}
-
-func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
- parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
- peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
-
- for {
- peekData, peekErr := peekBuffer.ReadBytes('\n')
- if peekErr != nil && peekErr != io.EOF {
- p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
- return "", peekErr
- }
-
- p.debug("readPythonMultilines: parsing %q", string(peekData))
-
- peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
- p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
- for n, v := range peekMatches {
- p.debug(" %d: %q", n, v)
- }
-
- // Return if not a Python multiline value.
- if len(peekMatches) != 3 {
- p.debug("readPythonMultilines: end of value, got: %q", line)
- return line, nil
- }
-
- // Advance the parser reader (buffer) in-sync with the peek buffer.
- _, err := p.buf.Discard(len(peekData))
- if err != nil {
- p.debug("readPythonMultilines: failed to skip to the end, returning error")
- return "", err
- }
-
- line += "\n" + peekMatches[0]
- }
-}
-
-// parse parses data through an io.Reader.
-func (f *File) parse(reader io.Reader) (err error) {
- p := newParser(reader, parserOptions{
- IgnoreContinuation: f.options.IgnoreContinuation,
- IgnoreInlineComment: f.options.IgnoreInlineComment,
- AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
- SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
- UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
- UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
- PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
- DebugFunc: f.options.DebugFunc,
- ReaderBufferSize: f.options.ReaderBufferSize,
- })
- if err = p.BOM(); err != nil {
- return fmt.Errorf("BOM: %v", err)
- }
-
- // Ignore error because default section name is never empty string.
- name := DefaultSection
- if f.options.Insensitive || f.options.InsensitiveSections {
- name = strings.ToLower(DefaultSection)
- }
- section, _ := f.NewSection(name)
-
- // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
- var isLastValueEmpty bool
- var lastRegularKey *Key
-
- var line []byte
- var inUnparseableSection bool
-
- // NOTE: Iterate and increase `currentPeekSize` until
- // the size of the parser buffer is found.
- // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
- parserBufferSize := 0
- // NOTE: Peek 4kb at a time.
- currentPeekSize := minReaderBufferSize
-
- if f.options.AllowPythonMultilineValues {
- for {
- peekBytes, _ := p.buf.Peek(currentPeekSize)
- peekBytesLength := len(peekBytes)
-
- if parserBufferSize >= peekBytesLength {
- break
- }
-
- currentPeekSize *= 2
- parserBufferSize = peekBytesLength
- }
- }
-
- for !p.isEOF {
- line, err = p.readUntil('\n')
- if err != nil {
- return err
- }
-
- if f.options.AllowNestedValues &&
- isLastValueEmpty && len(line) > 0 {
- if line[0] == ' ' || line[0] == '\t' {
- err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
- if err != nil {
- return err
- }
- continue
- }
- }
-
- line = bytes.TrimLeftFunc(line, unicode.IsSpace)
- if len(line) == 0 {
- continue
- }
-
- // Comments
- if line[0] == '#' || line[0] == ';' {
- // Note: we do not care ending line break,
- // it is needed for adding second line,
- // so just clean it once at the end when set to value.
- p.comment.Write(line)
- continue
- }
-
- // Section
- if line[0] == '[' {
- // Read to the next ']' (TODO: support quoted strings)
- closeIdx := bytes.LastIndexByte(line, ']')
- if closeIdx == -1 {
- return fmt.Errorf("unclosed section: %s", line)
- }
-
- name := string(line[1:closeIdx])
- section, err = f.NewSection(name)
- if err != nil {
- return err
- }
-
- comment, has := cleanComment(line[closeIdx+1:])
- if has {
- p.comment.Write(comment)
- }
-
- section.Comment = strings.TrimSpace(p.comment.String())
-
- // Reset auto-counter and comments
- p.comment.Reset()
- p.count = 1
- // Nested values can't span sections
- isLastValueEmpty = false
-
- inUnparseableSection = false
- for i := range f.options.UnparseableSections {
- if f.options.UnparseableSections[i] == name ||
- ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
- inUnparseableSection = true
- continue
- }
- }
- continue
- }
-
- if inUnparseableSection {
- section.isRawSection = true
- section.rawBody += string(line)
- continue
- }
-
- kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
- if err != nil {
- switch {
- // Treat as boolean key when desired, and whole line is key name.
- case IsErrDelimiterNotFound(err):
- switch {
- case f.options.AllowBooleanKeys:
- kname, err := p.readValue(line, parserBufferSize)
- if err != nil {
- return err
- }
- key, err := section.NewBooleanKey(kname)
- if err != nil {
- return err
- }
- key.Comment = strings.TrimSpace(p.comment.String())
- p.comment.Reset()
- continue
-
- case f.options.SkipUnrecognizableLines:
- continue
- }
- case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines:
- continue
- }
- return err
- }
-
- // Auto increment.
- isAutoIncr := false
- if kname == "-" {
- isAutoIncr = true
- kname = "#" + strconv.Itoa(p.count)
- p.count++
- }
-
- value, err := p.readValue(line[offset:], parserBufferSize)
- if err != nil {
- return err
- }
- isLastValueEmpty = len(value) == 0
-
- key, err := section.NewKey(kname, value)
- if err != nil {
- return err
- }
- key.isAutoIncrement = isAutoIncr
- key.Comment = strings.TrimSpace(p.comment.String())
- p.comment.Reset()
- lastRegularKey = key
- }
- return nil
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/section.go b/tools/vendor/gopkg.in/ini.v1/section.go
deleted file mode 100644
index a3615d820..000000000
--- a/tools/vendor/gopkg.in/ini.v1/section.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-// Section represents a config section.
-type Section struct {
- f *File
- Comment string
- name string
- keys map[string]*Key
- keyList []string
- keysHash map[string]string
-
- isRawSection bool
- rawBody string
-}
-
-func newSection(f *File, name string) *Section {
- return &Section{
- f: f,
- name: name,
- keys: make(map[string]*Key),
- keyList: make([]string, 0, 10),
- keysHash: make(map[string]string),
- }
-}
-
-// Name returns name of Section.
-func (s *Section) Name() string {
- return s.name
-}
-
-// Body returns rawBody of Section if the section was marked as unparseable.
-// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
-func (s *Section) Body() string {
- return strings.TrimSpace(s.rawBody)
-}
-
-// SetBody updates body content only if section is raw.
-func (s *Section) SetBody(body string) {
- if !s.isRawSection {
- return
- }
- s.rawBody = body
-}
-
-// NewKey creates a new key to given section.
-func (s *Section) NewKey(name, val string) (*Key, error) {
- if len(name) == 0 {
- return nil, errors.New("error creating new key: empty key name")
- } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
- name = strings.ToLower(name)
- }
-
- if s.f.BlockMode {
- s.f.lock.Lock()
- defer s.f.lock.Unlock()
- }
-
- if inSlice(name, s.keyList) {
- if s.f.options.AllowShadows {
- if err := s.keys[name].addShadow(val); err != nil {
- return nil, err
- }
- } else {
- s.keys[name].value = val
- s.keysHash[name] = val
- }
- return s.keys[name], nil
- }
-
- s.keyList = append(s.keyList, name)
- s.keys[name] = newKey(s, name, val)
- s.keysHash[name] = val
- return s.keys[name], nil
-}
-
-// NewBooleanKey creates a new boolean type key to given section.
-func (s *Section) NewBooleanKey(name string) (*Key, error) {
- key, err := s.NewKey(name, "true")
- if err != nil {
- return nil, err
- }
-
- key.isBooleanType = true
- return key, nil
-}
-
-// GetKey returns key in section by given name.
-func (s *Section) GetKey(name string) (*Key, error) {
- if s.f.BlockMode {
- s.f.lock.RLock()
- }
- if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
- name = strings.ToLower(name)
- }
- key := s.keys[name]
- if s.f.BlockMode {
- s.f.lock.RUnlock()
- }
-
- if key == nil {
- // Check if it is a child-section.
- sname := s.name
- for {
- if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
- sname = sname[:i]
- sec, err := s.f.GetSection(sname)
- if err != nil {
- continue
- }
- return sec.GetKey(name)
- }
- break
- }
- return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
- }
- return key, nil
-}
-
-// HasKey returns true if section contains a key with given name.
-func (s *Section) HasKey(name string) bool {
- key, _ := s.GetKey(name)
- return key != nil
-}
-
-// Deprecated: Use "HasKey" instead.
-func (s *Section) Haskey(name string) bool {
- return s.HasKey(name)
-}
-
-// HasValue returns true if section contains given raw value.
-func (s *Section) HasValue(value string) bool {
- if s.f.BlockMode {
- s.f.lock.RLock()
- defer s.f.lock.RUnlock()
- }
-
- for _, k := range s.keys {
- if value == k.value {
- return true
- }
- }
- return false
-}
-
-// Key assumes named Key exists in section and returns a zero-value when not.
-func (s *Section) Key(name string) *Key {
- key, err := s.GetKey(name)
- if err != nil {
- // It's OK here because the only possible error is empty key name,
- // but if it's empty, this piece of code won't be executed.
- key, _ = s.NewKey(name, "")
- return key
- }
- return key
-}
-
-// Keys returns list of keys of section.
-func (s *Section) Keys() []*Key {
- keys := make([]*Key, len(s.keyList))
- for i := range s.keyList {
- keys[i] = s.Key(s.keyList[i])
- }
- return keys
-}
-
-// ParentKeys returns list of keys of parent section.
-func (s *Section) ParentKeys() []*Key {
- var parentKeys []*Key
- sname := s.name
- for {
- if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
- sname = sname[:i]
- sec, err := s.f.GetSection(sname)
- if err != nil {
- continue
- }
- parentKeys = append(parentKeys, sec.Keys()...)
- } else {
- break
- }
-
- }
- return parentKeys
-}
-
-// KeyStrings returns list of key names of section.
-func (s *Section) KeyStrings() []string {
- list := make([]string, len(s.keyList))
- copy(list, s.keyList)
- return list
-}
-
-// KeysHash returns keys hash consisting of names and values.
-func (s *Section) KeysHash() map[string]string {
- if s.f.BlockMode {
- s.f.lock.RLock()
- defer s.f.lock.RUnlock()
- }
-
- hash := make(map[string]string, len(s.keysHash))
- for key, value := range s.keysHash {
- hash[key] = value
- }
- return hash
-}
-
-// DeleteKey deletes a key from section.
-func (s *Section) DeleteKey(name string) {
- if s.f.BlockMode {
- s.f.lock.Lock()
- defer s.f.lock.Unlock()
- }
-
- for i, k := range s.keyList {
- if k == name {
- s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
- delete(s.keys, name)
- delete(s.keysHash, name)
- return
- }
- }
-}
-
-// ChildSections returns a list of child sections of current section.
-// For example, "[parent.child1]" and "[parent.child12]" are child sections
-// of section "[parent]".
-func (s *Section) ChildSections() []*Section {
- prefix := s.name + s.f.options.ChildSectionDelimiter
- children := make([]*Section, 0, 3)
- for _, name := range s.f.sectionList {
- if strings.HasPrefix(name, prefix) {
- children = append(children, s.f.sections[name]...)
- }
- }
- return children
-}
diff --git a/tools/vendor/gopkg.in/ini.v1/struct.go b/tools/vendor/gopkg.in/ini.v1/struct.go
deleted file mode 100644
index a486b2fe0..000000000
--- a/tools/vendor/gopkg.in/ini.v1/struct.go
+++ /dev/null
@@ -1,747 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "time"
- "unicode"
-)
-
-// NameMapper represents a ini tag name mapper.
-type NameMapper func(string) string
-
-// Built-in name getters.
-var (
- // SnackCase converts to format SNACK_CASE.
- SnackCase NameMapper = func(raw string) string {
- newstr := make([]rune, 0, len(raw))
- for i, chr := range raw {
- if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
- if i > 0 {
- newstr = append(newstr, '_')
- }
- }
- newstr = append(newstr, unicode.ToUpper(chr))
- }
- return string(newstr)
- }
- // TitleUnderscore converts to format title_underscore.
- TitleUnderscore NameMapper = func(raw string) string {
- newstr := make([]rune, 0, len(raw))
- for i, chr := range raw {
- if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
- if i > 0 {
- newstr = append(newstr, '_')
- }
- chr -= 'A' - 'a'
- }
- newstr = append(newstr, chr)
- }
- return string(newstr)
- }
-)
-
-func (s *Section) parseFieldName(raw, actual string) string {
- if len(actual) > 0 {
- return actual
- }
- if s.f.NameMapper != nil {
- return s.f.NameMapper(raw)
- }
- return raw
-}
-
-func parseDelim(actual string) string {
- if len(actual) > 0 {
- return actual
- }
- return ","
-}
-
-var reflectTime = reflect.TypeOf(time.Now()).Kind()
-
-// setSliceWithProperType sets proper values to slice based on its type.
-func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
- var strs []string
- if allowShadow {
- strs = key.StringsWithShadows(delim)
- } else {
- strs = key.Strings(delim)
- }
-
- numVals := len(strs)
- if numVals == 0 {
- return nil
- }
-
- var vals interface{}
- var err error
-
- sliceOf := field.Type().Elem().Kind()
- switch sliceOf {
- case reflect.String:
- vals = strs
- case reflect.Int:
- vals, err = key.parseInts(strs, true, false)
- case reflect.Int64:
- vals, err = key.parseInt64s(strs, true, false)
- case reflect.Uint:
- vals, err = key.parseUints(strs, true, false)
- case reflect.Uint64:
- vals, err = key.parseUint64s(strs, true, false)
- case reflect.Float64:
- vals, err = key.parseFloat64s(strs, true, false)
- case reflect.Bool:
- vals, err = key.parseBools(strs, true, false)
- case reflectTime:
- vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
- default:
- return fmt.Errorf("unsupported type '[]%s'", sliceOf)
- }
- if err != nil && isStrict {
- return err
- }
-
- slice := reflect.MakeSlice(field.Type(), numVals, numVals)
- for i := 0; i < numVals; i++ {
- switch sliceOf {
- case reflect.String:
- slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
- case reflect.Int:
- slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
- case reflect.Int64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
- case reflect.Uint:
- slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
- case reflect.Uint64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
- case reflect.Float64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
- case reflect.Bool:
- slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
- case reflectTime:
- slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
- }
- }
- field.Set(slice)
- return nil
-}
-
-func wrapStrictError(err error, isStrict bool) error {
- if isStrict {
- return err
- }
- return nil
-}
-
-// setWithProperType sets proper value to field based on its type,
-// but it does not return error for failing parsing,
-// because we want to use default value that is already assigned to struct.
-func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
- vt := t
- isPtr := t.Kind() == reflect.Ptr
- if isPtr {
- vt = t.Elem()
- }
- switch vt.Kind() {
- case reflect.String:
- stringVal := key.String()
- if isPtr {
- field.Set(reflect.ValueOf(&stringVal))
- } else if len(stringVal) > 0 {
- field.SetString(key.String())
- }
- case reflect.Bool:
- boolVal, err := key.Bool()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- field.Set(reflect.ValueOf(&boolVal))
- } else {
- field.SetBool(boolVal)
- }
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- // ParseDuration will not return err for `0`, so check the type name
- if vt.Name() == "Duration" {
- durationVal, err := key.Duration()
- if err != nil {
- if intVal, err := key.Int64(); err == nil {
- field.SetInt(intVal)
- return nil
- }
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- field.Set(reflect.ValueOf(&durationVal))
- } else if int64(durationVal) > 0 {
- field.Set(reflect.ValueOf(durationVal))
- }
- return nil
- }
-
- intVal, err := key.Int64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- pv := reflect.New(t.Elem())
- pv.Elem().SetInt(intVal)
- field.Set(pv)
- } else {
- field.SetInt(intVal)
- }
- // byte is an alias for uint8, so supporting uint8 breaks support for byte
- case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- durationVal, err := key.Duration()
- // Skip zero value
- if err == nil && uint64(durationVal) > 0 {
- if isPtr {
- field.Set(reflect.ValueOf(&durationVal))
- } else {
- field.Set(reflect.ValueOf(durationVal))
- }
- return nil
- }
-
- uintVal, err := key.Uint64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- pv := reflect.New(t.Elem())
- pv.Elem().SetUint(uintVal)
- field.Set(pv)
- } else {
- field.SetUint(uintVal)
- }
-
- case reflect.Float32, reflect.Float64:
- floatVal, err := key.Float64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- pv := reflect.New(t.Elem())
- pv.Elem().SetFloat(floatVal)
- field.Set(pv)
- } else {
- field.SetFloat(floatVal)
- }
- case reflectTime:
- timeVal, err := key.Time()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- if isPtr {
- field.Set(reflect.ValueOf(&timeVal))
- } else {
- field.Set(reflect.ValueOf(timeVal))
- }
- case reflect.Slice:
- return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
- default:
- return fmt.Errorf("unsupported type %q", t)
- }
- return nil
-}
-
-func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
- opts := strings.SplitN(tag, ",", 5)
- rawName = opts[0]
- for _, opt := range opts[1:] {
- omitEmpty = omitEmpty || (opt == "omitempty")
- allowShadow = allowShadow || (opt == "allowshadow")
- allowNonUnique = allowNonUnique || (opt == "nonunique")
- extends = extends || (opt == "extends")
- }
- return rawName, omitEmpty, allowShadow, allowNonUnique, extends
-}
-
-// mapToField maps the given value to the matching field of the given section.
-// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
-func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- typ := val.Type()
-
- for i := 0; i < typ.NumField(); i++ {
- field := val.Field(i)
- tpField := typ.Field(i)
-
- tag := tpField.Tag.Get("ini")
- if tag == "-" {
- continue
- }
-
- rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
- fieldName := s.parseFieldName(tpField.Name, rawName)
- if len(fieldName) == 0 || !field.CanSet() {
- continue
- }
-
- isStruct := tpField.Type.Kind() == reflect.Struct
- isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
- isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
- if isAnonymousPtr {
- field.Set(reflect.New(tpField.Type.Elem()))
- }
-
- if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
- if isStructPtr && field.IsNil() {
- field.Set(reflect.New(tpField.Type.Elem()))
- }
- fieldSection := s
- if rawName != "" {
- sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
- if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
- fieldSection = secs[sectionIndex]
- }
- }
- if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
- return fmt.Errorf("map to field %q: %v", fieldName, err)
- }
- } else if isAnonymousPtr || isStruct || isStructPtr {
- if secs, err := s.f.SectionsByName(fieldName); err == nil {
- if len(secs) <= sectionIndex {
- return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
- }
- // Only set the field to non-nil struct value if we have a section for it.
- // Otherwise, we end up with a non-nil struct ptr even though there is no data.
- if isStructPtr && field.IsNil() {
- field.Set(reflect.New(tpField.Type.Elem()))
- }
- if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
- return fmt.Errorf("map to field %q: %v", fieldName, err)
- }
- continue
- }
- }
-
- // Map non-unique sections
- if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
- newField, err := s.mapToSlice(fieldName, field, isStrict)
- if err != nil {
- return fmt.Errorf("map to slice %q: %v", fieldName, err)
- }
-
- field.Set(newField)
- continue
- }
-
- if key, err := s.GetKey(fieldName); err == nil {
- delim := parseDelim(tpField.Tag.Get("delim"))
- if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
- return fmt.Errorf("set field %q: %v", fieldName, err)
- }
- }
- }
- return nil
-}
-
-// mapToSlice maps all sections with the same name and returns the new value.
-// The type of the Value must be a slice.
-func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
- secs, err := s.f.SectionsByName(secName)
- if err != nil {
- return reflect.Value{}, err
- }
-
- typ := val.Type().Elem()
- for i, sec := range secs {
- elem := reflect.New(typ)
- if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
- return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
- }
-
- val = reflect.Append(val, elem.Elem())
- }
- return val, nil
-}
-
-// mapTo maps a section to object v.
-func (s *Section) mapTo(v interface{}, isStrict bool) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- val = val.Elem()
- } else {
- return errors.New("not a pointer to a struct")
- }
-
- if typ.Kind() == reflect.Slice {
- newField, err := s.mapToSlice(s.name, val, isStrict)
- if err != nil {
- return err
- }
-
- val.Set(newField)
- return nil
- }
-
- return s.mapToField(val, isStrict, 0, s.name)
-}
-
-// MapTo maps section to given struct.
-func (s *Section) MapTo(v interface{}) error {
- return s.mapTo(v, false)
-}
-
-// StrictMapTo maps section to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func (s *Section) StrictMapTo(v interface{}) error {
- return s.mapTo(v, true)
-}
-
-// MapTo maps file to given struct.
-func (f *File) MapTo(v interface{}) error {
- return f.Section("").MapTo(v)
-}
-
-// StrictMapTo maps file to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func (f *File) StrictMapTo(v interface{}) error {
- return f.Section("").StrictMapTo(v)
-}
-
-// MapToWithMapper maps data sources to given struct with name mapper.
-func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
- cfg, err := Load(source, others...)
- if err != nil {
- return err
- }
- cfg.NameMapper = mapper
- return cfg.MapTo(v)
-}
-
-// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
-// which returns all possible error including value parsing error.
-func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
- cfg, err := Load(source, others...)
- if err != nil {
- return err
- }
- cfg.NameMapper = mapper
- return cfg.StrictMapTo(v)
-}
-
-// MapTo maps data sources to given struct.
-func MapTo(v, source interface{}, others ...interface{}) error {
- return MapToWithMapper(v, nil, source, others...)
-}
-
-// StrictMapTo maps data sources to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func StrictMapTo(v, source interface{}, others ...interface{}) error {
- return StrictMapToWithMapper(v, nil, source, others...)
-}
-
-// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
-func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
- slice := field.Slice(0, field.Len())
- if field.Len() == 0 {
- return nil
- }
- sliceOf := field.Type().Elem().Kind()
-
- if allowShadow {
- var keyWithShadows *Key
- for i := 0; i < field.Len(); i++ {
- var val string
- switch sliceOf {
- case reflect.String:
- val = slice.Index(i).String()
- case reflect.Int, reflect.Int64:
- val = fmt.Sprint(slice.Index(i).Int())
- case reflect.Uint, reflect.Uint64:
- val = fmt.Sprint(slice.Index(i).Uint())
- case reflect.Float64:
- val = fmt.Sprint(slice.Index(i).Float())
- case reflect.Bool:
- val = fmt.Sprint(slice.Index(i).Bool())
- case reflectTime:
- val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
- default:
- return fmt.Errorf("unsupported type '[]%s'", sliceOf)
- }
-
- if i == 0 {
- keyWithShadows = newKey(key.s, key.name, val)
- } else {
- _ = keyWithShadows.AddShadow(val)
- }
- }
- *key = *keyWithShadows
- return nil
- }
-
- var buf bytes.Buffer
- for i := 0; i < field.Len(); i++ {
- switch sliceOf {
- case reflect.String:
- buf.WriteString(slice.Index(i).String())
- case reflect.Int, reflect.Int64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
- case reflect.Uint, reflect.Uint64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
- case reflect.Float64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
- case reflect.Bool:
- buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
- case reflectTime:
- buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
- default:
- return fmt.Errorf("unsupported type '[]%s'", sliceOf)
- }
- buf.WriteString(delim)
- }
- key.SetValue(buf.String()[:buf.Len()-len(delim)])
- return nil
-}
-
-// reflectWithProperType does the opposite thing as setWithProperType.
-func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
- switch t.Kind() {
- case reflect.String:
- key.SetValue(field.String())
- case reflect.Bool:
- key.SetValue(fmt.Sprint(field.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- key.SetValue(fmt.Sprint(field.Int()))
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- key.SetValue(fmt.Sprint(field.Uint()))
- case reflect.Float32, reflect.Float64:
- key.SetValue(fmt.Sprint(field.Float()))
- case reflectTime:
- key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
- case reflect.Slice:
- return reflectSliceWithProperType(key, field, delim, allowShadow)
- case reflect.Ptr:
- if !field.IsNil() {
- return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
- }
- default:
- return fmt.Errorf("unsupported type %q", t)
- }
- return nil
-}
-
-// CR: copied from encoding/json/encode.go with modifications of time.Time support.
-// TODO: add more test coverage.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflectTime:
- t, ok := v.Interface().(time.Time)
- return ok && t.IsZero()
- }
- return false
-}
-
-// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
-type StructReflector interface {
- ReflectINIStruct(*File) error
-}
-
-func (s *Section) reflectFrom(val reflect.Value) error {
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- typ := val.Type()
-
- for i := 0; i < typ.NumField(); i++ {
- if !val.Field(i).CanInterface() {
- continue
- }
-
- field := val.Field(i)
- tpField := typ.Field(i)
-
- tag := tpField.Tag.Get("ini")
- if tag == "-" {
- continue
- }
-
- rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
- if omitEmpty && isEmptyValue(field) {
- continue
- }
-
- if r, ok := field.Interface().(StructReflector); ok {
- return r.ReflectINIStruct(s.f)
- }
-
- fieldName := s.parseFieldName(tpField.Name, rawName)
- if len(fieldName) == 0 || !field.CanSet() {
- continue
- }
-
- if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
- if err := s.reflectFrom(field); err != nil {
- return fmt.Errorf("reflect from field %q: %v", fieldName, err)
- }
- continue
- }
-
- if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
- (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
- // Note: The only error here is section doesn't exist.
- sec, err := s.f.GetSection(fieldName)
- if err != nil {
- // Note: fieldName can never be empty here, ignore error.
- sec, _ = s.f.NewSection(fieldName)
- }
-
- // Add comment from comment tag
- if len(sec.Comment) == 0 {
- sec.Comment = tpField.Tag.Get("comment")
- }
-
- if err = sec.reflectFrom(field); err != nil {
- return fmt.Errorf("reflect from field %q: %v", fieldName, err)
- }
- continue
- }
-
- if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
- slice := field.Slice(0, field.Len())
- if field.Len() == 0 {
- return nil
- }
- sliceOf := field.Type().Elem().Kind()
-
- for i := 0; i < field.Len(); i++ {
- if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
- return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
- }
-
- sec, err := s.f.NewSection(fieldName)
- if err != nil {
- return err
- }
-
- // Add comment from comment tag
- if len(sec.Comment) == 0 {
- sec.Comment = tpField.Tag.Get("comment")
- }
-
- if err := sec.reflectFrom(slice.Index(i)); err != nil {
- return fmt.Errorf("reflect from field %q: %v", fieldName, err)
- }
- }
- continue
- }
-
- // Note: Same reason as section.
- key, err := s.GetKey(fieldName)
- if err != nil {
- key, _ = s.NewKey(fieldName, "")
- }
-
- // Add comment from comment tag
- if len(key.Comment) == 0 {
- key.Comment = tpField.Tag.Get("comment")
- }
-
- delim := parseDelim(tpField.Tag.Get("delim"))
- if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
- return fmt.Errorf("reflect field %q: %v", fieldName, err)
- }
-
- }
- return nil
-}
-
-// ReflectFrom reflects section from given struct. It overwrites existing ones.
-func (s *Section) ReflectFrom(v interface{}) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
-
- if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
- (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
- // Clear sections to make sure none exists before adding the new ones
- s.f.DeleteSection(s.name)
-
- if typ.Kind() == reflect.Ptr {
- sec, err := s.f.NewSection(s.name)
- if err != nil {
- return err
- }
- return sec.reflectFrom(val.Elem())
- }
-
- slice := val.Slice(0, val.Len())
- sliceOf := val.Type().Elem().Kind()
- if sliceOf != reflect.Ptr {
- return fmt.Errorf("not a slice of pointers")
- }
-
- for i := 0; i < slice.Len(); i++ {
- sec, err := s.f.NewSection(s.name)
- if err != nil {
- return err
- }
-
- err = sec.reflectFrom(slice.Index(i))
- if err != nil {
- return fmt.Errorf("reflect from %dth field: %v", i, err)
- }
- }
-
- return nil
- }
-
- if typ.Kind() == reflect.Ptr {
- val = val.Elem()
- } else {
- return errors.New("not a pointer to a struct")
- }
-
- return s.reflectFrom(val)
-}
-
-// ReflectFrom reflects file from given struct.
-func (f *File) ReflectFrom(v interface{}) error {
- return f.Section("").ReflectFrom(v)
-}
-
-// ReflectFromWithMapper reflects data sources from given struct with name mapper.
-func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
- cfg.NameMapper = mapper
- return cfg.ReflectFrom(v)
-}
-
-// ReflectFrom reflects data sources from given struct.
-func ReflectFrom(cfg *File, v interface{}) error {
- return ReflectFromWithMapper(cfg, v, nil)
-}
diff --git a/tools/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/tools/vendor/helm.sh/helm/v3/internal/resolver/resolver.go
index 5e8921f96..b6f45da9e 100644
--- a/tools/vendor/helm.sh/helm/v3/internal/resolver/resolver.go
+++ b/tools/vendor/helm.sh/helm/v3/internal/resolver/resolver.go
@@ -77,7 +77,6 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
continue
}
if strings.HasPrefix(d.Repository, "file://") {
-
chartpath, err := GetLocalPath(d.Repository, r.chartpath)
if err != nil {
return nil, err
@@ -95,7 +94,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
}
if !constraint.Check(v) {
- missing = append(missing, d.Name)
+ missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version))
continue
}
@@ -173,7 +172,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
Repository: d.Repository,
Version: version,
}
- // The version are already sorted and hence the first one to satisfy the constraint is used
+ // The versions are already sorted and hence the first one to satisfy the constraint is used
for _, ver := range vs {
v, err := semver.NewVersion(ver.Version)
// OCI does not need URLs
@@ -189,11 +188,11 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
}
if !found {
- missing = append(missing, d.Name)
+ missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version))
}
}
if len(missing) > 0 {
- return nil, errors.Errorf("can't get a valid version for repositories %s. Try changing the version constraint in Chart.yaml", strings.Join(missing, ", "))
+ return nil, errors.Errorf("can't get a valid version for %d subchart(s): %s. Make sure a matching chart version exists in the repo, or change the version constraint in Chart.yaml", len(missing), strings.Join(missing, ", "))
}
digest, err := HashReq(reqs, locked)
diff --git a/tools/vendor/helm.sh/helm/v3/internal/sympath/walk.go b/tools/vendor/helm.sh/helm/v3/internal/sympath/walk.go
index a276cfeff..6b221fb6c 100644
--- a/tools/vendor/helm.sh/helm/v3/internal/sympath/walk.go
+++ b/tools/vendor/helm.sh/helm/v3/internal/sympath/walk.go
@@ -71,6 +71,7 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if err != nil {
return errors.Wrapf(err, "error evaluating symlink %s", path)
}
+ //This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons.
log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved)
if info, err = os.Lstat(resolved); err != nil {
return err
diff --git a/tools/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/tools/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go
index 4e4eacc60..d29bb5f87 100644
--- a/tools/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go
+++ b/tools/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go
@@ -260,7 +260,7 @@ func fixLongPath(path string) string {
// minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
//
// The MSDN docs appear to say that a normal path that is 248 bytes long
- // will work; empirically the path must be less then 248 bytes long.
+ // will work; empirically the path must be less than 248 bytes long.
if len(path) < 248 {
// Don't fix. (This is how Go 1.7 and earlier worked,
// not automatically generating the \\?\ form)
diff --git a/tools/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/tools/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go
index dc832ed80..7cd1dace9 100644
--- a/tools/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go
+++ b/tools/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go
@@ -65,7 +65,7 @@ func CertPoolFromFile(filename string) (*x509.CertPool, error) {
return cp, nil
}
-// CertFromFilePair returns an tls.Certificate containing the
+// CertFromFilePair returns a tls.Certificate containing the
// certificates public/private key pair from a pair of given PEM-encoded files.
// Returns an error if the file could not be read, a certificate could not
// be parsed, or if the file does not contain any certificates
diff --git a/tools/vendor/helm.sh/helm/v3/internal/version/version.go b/tools/vendor/helm.sh/helm/v3/internal/version/version.go
index d20f25897..539659f5e 100644
--- a/tools/vendor/helm.sh/helm/v3/internal/version/version.go
+++ b/tools/vendor/helm.sh/helm/v3/internal/version/version.go
@@ -29,7 +29,7 @@ var (
//
// Increment major number for new feature additions and behavioral changes.
// Increment minor number for bug fixes and performance enhancements.
- version = "v3.15"
+ version = "v3.16"
// metadata is extra build time data
metadata = ""
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/action.go b/tools/vendor/helm.sh/helm/v3/pkg/action/action.go
index 863c48f07..fe91de048 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/action.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/action.go
@@ -165,7 +165,7 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
- hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder)
+ hs, manifests, err := releaseutil.SortManifests(files, nil, releaseutil.InstallOrder)
if err != nil {
// By catching parse errors here, we can prevent bogus releases from going
// to Kubernetes.
@@ -330,7 +330,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
}
versionMap := make(map[string]interface{})
- versions := []string{}
+ var versions []string
// Extract the groups
for _, g := range groups {
@@ -411,12 +411,11 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
namespace,
)
if err != nil {
- panic(fmt.Sprintf("Unable to instantiate SQL driver: %v", err))
+ return errors.Wrap(err, "unable to instantiate SQL driver")
}
store = storage.Init(d)
default:
- // Not sure what to do here.
- panic("Unknown driver in HELM_DRIVER: " + helmDriver)
+ return errors.Errorf("unknown driver %q", helmDriver)
}
cfg.RESTClientGetter = getter
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go
index 0af625dff..4bffb6ae0 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go
@@ -99,7 +99,8 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted
// under succeeded condition. If so, then clear the corresponding resource object in each hook
- for _, h := range executingHooks {
+ for i := len(executingHooks) - 1; i >= 0; i-- {
+ h := executingHooks[i]
if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil {
return err
}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/install.go b/tools/vendor/helm.sh/helm/v3/pkg/action/install.go
index de612e3b7..7ca40c88a 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/install.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/install.go
@@ -55,7 +55,7 @@ import (
"helm.sh/helm/v3/pkg/storage/driver"
)
-// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine
+// notesFileSuffix that we want to treat special. It goes through the templating engine
// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually
// wants to see this file after rendering in the status command. However, it must be a suffix
// since there can be filepath in front of it.
@@ -93,6 +93,8 @@ type Install struct {
Atomic bool
SkipCRDs bool
SubNotes bool
+ HideNotes bool
+ SkipSchemaValidation bool
DisableOpenAPIValidation bool
IncludeCRDs bool
Labels map[string]string
@@ -108,7 +110,9 @@ type Install struct {
// Used by helm template to add the release as part of OutputDir path
// OutputDir/
UseReleaseName bool
- PostRenderer postrender.PostRenderer
+ // TakeOwnership will ignore the check for helm annotations and take ownership of the resources.
+ TakeOwnership bool
+ PostRenderer postrender.PostRenderer
// Lock to control raceconditions when the process receives a SIGTERM
Lock sync.Mutex
}
@@ -297,13 +301,13 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
IsInstall: !isUpgrade,
IsUpgrade: isUpgrade,
}
- valuesToRender, err := chartutil.ToRenderValues(chrt, vals, options, caps)
+ valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation)
if err != nil {
return nil, err
}
if driver.ContainsSystemLabels(i.Labels) {
- return nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ return nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
}
rel := i.createRelease(chrt, vals, i.Labels)
@@ -343,7 +347,11 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// deleting the release because the manifest will be pointing at that
// resource
if !i.ClientOnly && !isUpgrade && len(resources) > 0 {
- toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace)
+ if i.TakeOwnership {
+ toBeAdopted, err = requireAdoption(resources)
+ } else {
+ toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace)
+ }
if err != nil {
return nil, errors.Wrap(err, "Unable to continue with install")
}
@@ -381,7 +389,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
}
}
- // If Replace is true, we need to supercede the last release.
+ // If Replace is true, we need to supersede the last release.
if i.Replace {
if err := i.replaceRelease(rel); err != nil {
return nil, err
@@ -623,7 +631,7 @@ func createOrOpenFile(filename string, append bool) (*os.File, error) {
return os.Create(filename)
}
-// check if the directory exists to create file. creates if don't exists
+// check if the directory exists to create file. creates if doesn't exist
func ensureDirectoryForFile(file string) error {
baseDir := path.Dir(file)
_, err := os.Stat(baseDir)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/lint.go b/tools/vendor/helm.sh/helm/v3/pkg/action/lint.go
index ca497f2b8..63a1bf354 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/lint.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/lint.go
@@ -32,11 +32,12 @@ import (
//
// It provides the implementation of 'helm lint'.
type Lint struct {
- Strict bool
- Namespace string
- WithSubcharts bool
- Quiet bool
- KubeVersion *chartutil.KubeVersion
+ Strict bool
+ Namespace string
+ WithSubcharts bool
+ Quiet bool
+ SkipSchemaValidation bool
+ KubeVersion *chartutil.KubeVersion
}
// LintResult is the result of Lint
@@ -59,7 +60,7 @@ func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult {
}
result := &LintResult{}
for _, path := range paths {
- linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion)
+ linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion, l.SkipSchemaValidation)
if err != nil {
result.Errors = append(result.Errors, err)
continue
@@ -86,7 +87,7 @@ func HasWarningsOrErrors(result *LintResult) bool {
return len(result.Errors) > 0
}
-func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) (support.Linter, error) {
+func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) (support.Linter, error) {
var chartPath string
linter := support.Linter{}
@@ -125,5 +126,5 @@ func lintChart(path string, vals map[string]interface{}, namespace string, kubeV
return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart")
}
- return lint.AllWithKubeVersion(chartPath, vals, namespace, kubeVersion), nil
+ return lint.AllWithKubeVersionAndSchemaValidation(chartPath, vals, namespace, kubeVersion, skipSchemaValidation), nil
}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/release_testing.go b/tools/vendor/helm.sh/helm/v3/pkg/action/release_testing.go
index 3c10cecf8..aaffe47ca 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/release_testing.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/release_testing.go
@@ -44,6 +44,7 @@ type ReleaseTesting struct {
// Used for fetching logs from test pods
Namespace string
Filters map[string][]string
+ HideNotes bool
}
// NewReleaseTesting creates a new ReleaseTesting object with the given configuration.
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/tools/vendor/helm.sh/helm/v3/pkg/action/uninstall.go
index 40d82243e..ac0c4fee8 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/uninstall.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/uninstall.go
@@ -196,13 +196,9 @@ func joinErrors(errs []error) string {
// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process
func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, string, []error) {
var errs []error
- caps, err := u.cfg.getCapabilities()
- if err != nil {
- return nil, rel.Manifest, []error{errors.Wrap(err, "could not get apiVersions from Kubernetes")}
- }
manifests := releaseutil.SplitManifests(rel.Manifest)
- _, files, err := releaseutil.SortManifests(manifests, caps.APIVersions, releaseutil.UninstallOrder)
+ _, files, err := releaseutil.SortManifests(manifests, nil, releaseutil.UninstallOrder)
if err != nil {
// We could instead just delete everything in no particular order.
// FIXME: One way to delete at this point would be to try a label-based
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/tools/vendor/helm.sh/helm/v3/pkg/action/upgrade.go
index 2bd40a850..a08d68495 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/upgrade.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/upgrade.go
@@ -97,6 +97,10 @@ type Upgrade struct {
CleanupOnFail bool
// SubNotes determines whether sub-notes are rendered in the chart.
SubNotes bool
+ // HideNotes determines whether notes are output during upgrade
+ HideNotes bool
+ // SkipSchemaValidation determines if JSON schema validation is disabled.
+ SkipSchemaValidation bool
// Description is the description of this operation
Description string
Labels map[string]string
@@ -113,6 +117,8 @@ type Upgrade struct {
Lock sync.Mutex
// Enable DNS lookups when rendering templates
EnableDNS bool
+ // TakeOwnership will skip the check for helm annotations and adopt all existing resources.
+ TakeOwnership bool
}
type resultMessage struct {
@@ -256,7 +262,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
if err != nil {
return nil, nil, err
}
- valuesToRender, err := chartutil.ToRenderValues(chart, vals, options, caps)
+ valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation)
if err != nil {
return nil, nil, err
}
@@ -273,7 +279,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
}
if driver.ContainsSystemLabels(u.Labels) {
- return nil, nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ return nil, nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
}
// Store an upgraded release.
@@ -337,7 +343,12 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
}
}
- toBeUpdated, err := existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace)
+ var toBeUpdated kube.ResourceList
+ if u.TakeOwnership {
+ toBeUpdated, err = requireAdoption(toBeCreated)
+ } else {
+ toBeUpdated, err = existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace)
+ }
if err != nil {
return nil, errors.Wrap(err, "Unable to continue with update")
}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/validate.go b/tools/vendor/helm.sh/helm/v3/pkg/action/validate.go
index 73eb1937b..127e9bf96 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/action/validate.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/action/validate.go
@@ -37,6 +37,31 @@ const (
helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace"
)
+// requireAdoption returns the subset of resources that already exist in the cluster.
+func requireAdoption(resources kube.ResourceList) (kube.ResourceList, error) {
+ var requireUpdate kube.ResourceList
+
+ err := resources.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(info.Client, info.Mapping)
+ _, err = helper.Get(info.Namespace, info.Name)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info))
+ }
+
+ requireUpdate.Append(info)
+ return nil
+ })
+
+ return requireUpdate, err
+}
+
func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) {
var requireUpdate kube.ResourceList
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go
index 196e5f81d..8bb549346 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go
@@ -101,7 +101,7 @@ func ensureArchive(name string, raw *os.File) error {
return nil
}
-// isGZipApplication checks whether the achieve is of the application/x-gzip type.
+// isGZipApplication checks whether the archive is of the application/x-gzip type.
func isGZipApplication(data []byte) bool {
sig := []byte("\x1F\x8B\x08")
return bytes.HasPrefix(data, sig)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go
index 5f57e11a5..48fab0ea4 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go
@@ -53,7 +53,7 @@ var (
type Capabilities struct {
// KubeVersion is the Kubernetes version.
KubeVersion KubeVersion
- // APIversions are supported Kubernetes API versions.
+ // APIVersions are supported Kubernetes API versions.
APIVersions VersionSet
// HelmVersion is the build information for this helm version
HelmVersion helmversion.BuildInfo
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go
index 50212f9d5..e9769932f 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go
@@ -106,18 +106,24 @@ const defaultValues = `# Default values for %s.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
+# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
+# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: nginx
+ # This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
+# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
+# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
+#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
@@ -129,7 +135,11 @@ serviceAccount:
# If not set and create is true, a name is generated using the fullname template
name: ""
+# This is for setting Kubernetes Annotations to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
+# This is for setting Kubernetes Labels to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
@@ -143,10 +153,14 @@ securityContext: {}
# runAsNonRoot: true
# runAsUser: 1000
+# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
+ # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
+ # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 80
+# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
@@ -175,6 +189,7 @@ resources: {}
# cpu: 100m
# memory: 128Mi
+# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
@@ -184,6 +199,7 @@ readinessProbe:
path: /
port: http
+#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
@@ -237,23 +253,10 @@ const defaultIgnore = `# Patterns to ignore when building packages.
`
const defaultIngress = `{{- if .Values.ingress.enabled -}}
-{{- $fullName := include ".fullname" . -}}
-{{- $svcPort := .Values.service.port -}}
-{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
- {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
- {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
- {{- end }}
-{{- end }}
-{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
-{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
-apiVersion: networking.k8s.io/v1beta1
-{{- else -}}
-apiVersion: extensions/v1beta1
-{{- end }}
kind: Ingress
metadata:
- name: {{ $fullName }}
+ name: {{ include ".fullname" . }}
labels:
{{- include ".labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
@@ -261,8 +264,8 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
- {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
- ingressClassName: {{ .Values.ingress.className }}
+ {{- with .Values.ingress.className }}
+ ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
@@ -281,19 +284,14 @@ spec:
paths:
{{- range .paths }}
- path: {{ .path }}
- {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
- pathType: {{ .pathType }}
+ {{- with .pathType }}
+ pathType: {{ . }}
{{- end }}
backend:
- {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
- name: {{ $fullName }}
+ name: {{ include ".fullname" $ }}
port:
- number: {{ $svcPort }}
- {{- else }}
- serviceName: {{ $fullName }}
- servicePort: {{ $svcPort }}
- {{- end }}
+ number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go
index 205d99e09..36a341927 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go
@@ -137,7 +137,7 @@ func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path str
// If any dependency is not a part of Chart.yaml
// then this should be added to chartDependencies.
// However, if the dependency is already specified in Chart.yaml
- // we should not add it, as it would be anyways processed from Chart.yaml
+ // we should not add it, as it would be processed from Chart.yaml anyway.
Loop:
for _, existing := range c.Dependencies() {
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/values.go
index 2fa2bdabb..61c633a6d 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/values.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/values.go
@@ -135,6 +135,13 @@ type ReleaseOptions struct {
//
// This takes both ReleaseOptions and Capabilities to merge into the render values.
func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) {
+ return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false)
+}
+
+// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValuesWithSchemaValidation(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities, skipSchemaValidation bool) (Values, error) {
if caps == nil {
caps = DefaultCapabilities
}
@@ -156,9 +163,11 @@ func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options
return top, err
}
- if err := ValidateAgainstSchema(chrt, vals); err != nil {
- errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s"
- return top, fmt.Errorf(errFmt, err.Error())
+ if !skipSchemaValidation {
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s"
+ return top, fmt.Errorf(errFmt, err.Error())
+ }
}
top["Values"] = vals
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go
index 438ba1515..0f28c61fd 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go
@@ -156,7 +156,7 @@ func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output")
fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file")
fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs")
- fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the file containing cached repository indexes")
+ fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the directory containing cached repository indexes")
fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit")
fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting")
}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go
index d5340575d..ec4056d27 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go
@@ -173,7 +173,7 @@ func (m *Manager) Update() error {
// has some information about them and, when possible, the index files
// locally.
// TODO(mattfarina): Repositories should be explicitly added by end users
- // rather than automattic. In Helm v4 require users to add repositories. They
+ // rather than automatic. In Helm v4 require users to add repositories. They
// should have to add them in order to make sure they are aware of the
// repositories and opt-in to any locations, for security.
repoNames, err = m.ensureMissingRepos(repoNames, req)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go
index 058cfa749..df3a600a3 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go
@@ -169,7 +169,7 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool
})
// We need a .New template, as template text which is just blanks
- // or comments after parsing out defines just addes new named
+ // or comments after parsing out defines just adds new named
// template definitions without changing the main template.
// https://pkg.go.dev/text/template#Template.Parse
// Use the parent's name for lack of a better way to identify the tpl
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/tools/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go
index 86a7d698c..75e85098d 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go
@@ -131,7 +131,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met
return res, err
}
for _, resource := range resList.APIResources {
- // if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now.
+ // if a resource contains a "/" it's referencing a subresource. we don't support subresource for now.
if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") {
res = resource
res.Group = gvk.Group
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/tools/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go
index 22d7bf0a1..6b4f1fc77 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go
@@ -34,7 +34,7 @@ const (
DataHomeEnvVar = "HELM_DATA_HOME"
)
-// lazypath is an lazy-loaded path buffer for the XDG base directory specification.
+// lazypath is a lazy-loaded path buffer for the XDG base directory specification.
type lazypath string
func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string {
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/ignore/doc.go b/tools/vendor/helm.sh/helm/v3/pkg/ignore/doc.go
index 5245d410e..1f5e91847 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/ignore/doc.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/ignore/doc.go
@@ -26,7 +26,7 @@ The formatting rules are as follows:
- Parsing is line-by-line
- Empty lines are ignored
- - Lines the begin with # (comments) will be ignored
+ - Lines that begin with # (comments) will be ignored
- Leading and trailing spaces are always ignored
- Inline comments are NOT supported ('foo* # Any foo' does not contain a comment)
- There is no support for multi-line patterns
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go
index 9df833a43..d979fd22c 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go
@@ -124,7 +124,7 @@ func (c *Client) getKubeClient() (*kubernetes.Clientset, error) {
func (c *Client) IsReachable() error {
client, err := c.getKubeClient()
if err == genericclioptions.ErrEmptyConfig {
- // re-replace kubernetes ErrEmptyConfig error with a friendy error
+ // re-replace kubernetes ErrEmptyConfig error with a friendly error
// moar workarounds for Kubernetes API breaking.
return errors.New("Kubernetes cluster unreachable")
}
@@ -635,7 +635,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
// Get a versioned object
versionedObject := AsVersioned(target)
- // Unstructured objects, such as CRDs, may not have an not registered error
+ // Unstructured objects, such as CRDs, may not have a not registered error
// returned from ConvertToVersion. Anything that's unstructured should
// use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported
// on objects like CRDs.
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/ready.go
index b2d26ba76..55c4a39bf 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/kube/ready.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/ready.go
@@ -426,7 +426,7 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
return false
}
// This check only makes sense when all partitions are being upgraded otherwise during a
- // partioned rolling upgrade, this condition will never evaluate to true, leading to
+ // partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go
index ee8f83a25..d441db8a7 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go
@@ -26,7 +26,7 @@ func (r *ResourceList) Append(val *resource.Info) {
*r = append(*r, val)
}
-// Visit implements resource.Visitor.
+// Visit implements resource.Visitor. The visitor stops if fn returns an error.
func (r ResourceList) Visit(fn resource.VisitorFunc) error {
for _, i := range r {
if err := fn(i, nil); err != nil {
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/tools/vendor/helm.sh/helm/v3/pkg/lint/lint.go
index c0e79f55b..ef23ee7c8 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/lint/lint.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/lint/lint.go
@@ -24,20 +24,25 @@ import (
"helm.sh/helm/v3/pkg/lint/support"
)
-// All runs all of the available linters on the given base directory.
+// All runs all the available linters on the given base directory.
func All(basedir string, values map[string]interface{}, namespace string, _ bool) support.Linter {
return AllWithKubeVersion(basedir, values, namespace, nil)
}
// AllWithKubeVersion runs all the available linters on the given base directory, allowing to specify the kubernetes version.
func AllWithKubeVersion(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) support.Linter {
+ return AllWithKubeVersionAndSchemaValidation(basedir, values, namespace, kubeVersion, false)
+}
+
+// AllWithKubeVersionAndSchemaValidation runs all the available linters on the given base directory, allowing to specify the kubernetes version and if schema validation is enabled or not.
+func AllWithKubeVersionAndSchemaValidation(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) support.Linter {
// Using abs path to get directory context
chartDir, _ := filepath.Abs(basedir)
linter := support.Linter{ChartDir: chartDir}
rules.Chartfile(&linter)
rules.ValuesWithOverrides(&linter, values)
- rules.TemplatesWithKubeVersion(&linter, values, namespace, kubeVersion)
+ rules.TemplatesWithSkipSchemaValidation(&linter, values, namespace, kubeVersion, skipSchemaValidation)
rules.Dependencies(&linter)
return linter
}
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go
index 661c7f963..41d1a1bab 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go
@@ -51,6 +51,11 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace
// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version.
func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) {
+ TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false)
+}
+
+// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not.
+func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) {
fpath := "templates/"
templatesPath := filepath.Join(linter.ChartDir, fpath)
@@ -91,7 +96,7 @@ func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interfac
return
}
- valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, caps)
+ valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation)
if err != nil {
linter.RunLinterRule(support.ErrorSev, fpath, err)
return
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go
index 45fbdd0b5..727cdae03 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go
@@ -65,8 +65,7 @@ func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (str
// If string is empty, set wildcard constraint
constraint, _ = semver.NewConstraint("*")
} else {
- // when customer input exact version, check whether have exact match
- // one first
+ // when customer inputs specific version, check whether there's an exact match first
for _, v := range tags {
if versionString == v {
return v, nil
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/release/status.go b/tools/vendor/helm.sh/helm/v3/pkg/release/status.go
index e0e3ed62a..edd27a5f1 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/release/status.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/release/status.go
@@ -31,13 +31,13 @@ const (
StatusSuperseded Status = "superseded"
// StatusFailed indicates that the release was not successfully deployed.
StatusFailed Status = "failed"
- // StatusUninstalling indicates that a uninstall operation is underway.
+ // StatusUninstalling indicates that an uninstall operation is underway.
StatusUninstalling Status = "uninstalling"
// StatusPendingInstall indicates that an install operation is underway.
StatusPendingInstall Status = "pending-install"
// StatusPendingUpgrade indicates that an upgrade operation is underway.
StatusPendingUpgrade Status = "pending-upgrade"
- // StatusPendingRollback indicates that an rollback operation is underway.
+ // StatusPendingRollback indicates that a rollback operation is underway.
StatusPendingRollback Status = "pending-rollback"
)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go
index 413de30e2..4b6109929 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go
@@ -41,7 +41,6 @@ type Manifest struct {
type manifestFile struct {
entries map[string]string
path string
- apis chartutil.VersionSet
}
// result is an intermediate structure used during sorting.
@@ -75,7 +74,7 @@ var events = map[string]release.HookEvent{
//
// Files that do not parse into the expected format are simply placed into a map and
// returned.
-func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
+func SortManifests(files map[string]string, _ chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
result := &result{}
var sortedFilePaths []string
@@ -100,7 +99,6 @@ func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering
manifestFile := &manifestFile{
entries: SplitManifests(content),
path: filePath,
- apis: apis,
}
if err := manifestFile.sort(result); err != nil {
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go b/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go
index 40b11c5cf..e1ce3c62d 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go
@@ -200,7 +200,7 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) {
}
}
- // when customer input exact version, check whether have exact match one first
+ // when customer inputs specific version, check whether there's an exact match first
if len(version) != 0 {
for _, ver := range vs {
if version == ver.Version {
@@ -371,6 +371,8 @@ func loadIndex(data []byte, source string) (*IndexFile, error) {
cvs = append(cvs[:idx], cvs[idx+1:]...)
}
}
+ // adjust slice to only contain a set of valid versions
+ i.Entries[name] = cvs
}
i.SortEntries()
if i.APIVersion == "" {
@@ -397,7 +399,7 @@ func jsonOrYamlUnmarshal(b []byte, i interface{}) error {
// the error isn't important for index loading
//
// In particular, charts may introduce validations that don't impact repository indexes
-// And repository indexes may be generated by older/non-complient software, which doesn't
+// And repository indexes may be generated by older/non-compliant software, which doesn't
// conform to all validations.
func ignoreSkippableChartValidationError(err error) error {
verr, ok := err.(chart.ValidationError)
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go
index 2ef951184..33bde9b6a 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go
@@ -72,8 +72,8 @@ const (
// Following limits based on k8s labels limits - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
const (
- sqlCustomLabelsTableKeyMaxLenght = 253 + 1 + 63
- sqlCustomLabelsTableValueMaxLenght = 63
+ sqlCustomLabelsTableKeyMaxLength = 253 + 1 + 63
+ sqlCustomLabelsTableValueMaxLength = 63
)
const (
@@ -119,7 +119,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
}
}
- // check if all migrations appliyed
+ // check if all migrations applied
if len(migrationsIDs) != 0 {
for id := range migrationsIDs {
s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id)
@@ -204,7 +204,7 @@ func (s *SQL) ensureDBSetup() error {
CREATE TABLE %s (
%s VARCHAR(64),
%s VARCHAR(67),
- %s VARCHAR(%d),
+ %s VARCHAR(%d),
%s VARCHAR(%d)
);
CREATE INDEX ON %s (%s, %s);
@@ -216,9 +216,9 @@ func (s *SQL) ensureDBSetup() error {
sqlCustomLabelsTableReleaseKeyColumn,
sqlCustomLabelsTableReleaseNamespaceColumn,
sqlCustomLabelsTableKeyColumn,
- sqlCustomLabelsTableKeyMaxLenght,
+ sqlCustomLabelsTableKeyMaxLength,
sqlCustomLabelsTableValueColumn,
- sqlCustomLabelsTableValueMaxLenght,
+ sqlCustomLabelsTableValueMaxLength,
sqlCustomLabelsTableName,
sqlCustomLabelsTableReleaseKeyColumn,
sqlCustomLabelsTableReleaseNamespaceColumn,
diff --git a/tools/vendor/helm.sh/helm/v3/pkg/time/time.go b/tools/vendor/helm.sh/helm/v3/pkg/time/time.go
index 44f3fedfb..1abe8ae3d 100644
--- a/tools/vendor/helm.sh/helm/v3/pkg/time/time.go
+++ b/tools/vendor/helm.sh/helm/v3/pkg/time/time.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Package time contains a wrapper for time.Time in the standard library and
-// associated methods. This package mainly exists to workaround an issue in Go
+// associated methods. This package mainly exists to work around an issue in Go
// where the serializer doesn't omit an empty value for time:
// https://github.com/golang/go/issues/11939. As such, this can be removed if a
// proposal is ever accepted for Go
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
index d43b0c25e..adc496feb 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
@@ -21,29 +21,40 @@ import (
)
// NewSimpleFakeResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided
-func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder {
- return &fakeResourceFinder{
+func NewSimpleFakeResourceFinder(infos ...*resource.Info) *FakeResourceFinder {
+ return &FakeResourceFinder{
Infos: infos,
}
}
-type fakeResourceFinder struct {
+func (f *FakeResourceFinder) WithError(err error) *FakeResourceFinder {
+ f.err = err
+ return f
+}
+
+type FakeResourceFinder struct {
Infos []*resource.Info
+ err error
}
// Do implements the interface
-func (f *fakeResourceFinder) Do() resource.Visitor {
+func (f *FakeResourceFinder) Do() resource.Visitor {
return &fakeResourceResult{
Infos: f.Infos,
+ err: f.err,
}
}
type fakeResourceResult struct {
Infos []*resource.Info
+ err error
}
// Visit just iterates over info
func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error {
+ if r.err != nil {
+ return r.err
+ }
for _, info := range r.Infos {
err := fn(info, nil)
if err != nil {
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
index a5f307de5..e3cee3473 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
@@ -21,9 +21,9 @@ import (
"path/filepath"
"strings"
- jsonpatch "github.com/evanphx/json-patch"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+ jsonpatch "gopkg.in/evanphx/json-patch.v4"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/printers/json.go b/tools/vendor/k8s.io/cli-runtime/pkg/printers/json.go
index 8ab2235f8..7d14a4e5a 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/printers/json.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/printers/json.go
@@ -19,6 +19,7 @@ package printers
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"reflect"
@@ -36,13 +37,13 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
// we need an actual value in order to retrieve the package path for an object.
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
switch obj := obj.(type) {
case *metav1.WatchEvent:
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
data, err := json.Marshal(obj)
if err != nil {
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go b/tools/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
index 769960d66..216449ec4 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
@@ -19,6 +19,7 @@ package printers
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"reflect"
@@ -119,7 +120,7 @@ func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
// we need an actual value in order to retrieve the package path for an object.
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
var queryObj interface{} = obj
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/printers/name.go b/tools/vendor/k8s.io/cli-runtime/pkg/printers/name.go
index 086166af2..1d2fe7f96 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/printers/name.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/printers/name.go
@@ -17,6 +17,7 @@ limitations under the License.
package printers
import (
+ "errors"
"fmt"
"io"
"reflect"
@@ -52,7 +53,7 @@ func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
// we need an actual value in order to retrieve the package path for an object.
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
if meta.IsListType(obj) {
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/printers/template.go b/tools/vendor/k8s.io/cli-runtime/pkg/printers/template.go
index ccff54226..4b08573ce 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/printers/template.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/printers/template.go
@@ -18,6 +18,7 @@ package printers
import (
"encoding/base64"
+ "errors"
"fmt"
"io"
"reflect"
@@ -61,7 +62,7 @@ func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) {
// PrintObj formats the obj with the Go Template.
func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
var data []byte
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go b/tools/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go
index 9c444bdc2..8c6be82fe 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go
@@ -17,6 +17,7 @@ limitations under the License.
package printers
import (
+ "errors"
"fmt"
"io"
"reflect"
@@ -42,7 +43,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
// we need an actual value in order to retrieve the package path for an object.
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
count := atomic.AddInt64(&p.printCount, 1)
@@ -55,7 +56,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
switch obj := obj.(type) {
case *metav1.WatchEvent:
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
- return fmt.Errorf(InternalObjectPrinterErr)
+ return errors.New(InternalObjectPrinterErr)
}
data, err := yaml.Marshal(obj)
if err != nil {
diff --git a/tools/vendor/k8s.io/cli-runtime/pkg/resource/builder.go b/tools/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
index 47ec83bbb..5f8fdcd9b 100644
--- a/tools/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
+++ b/tools/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
@@ -1030,7 +1030,7 @@ func (b *Builder) visitByResource() *Result {
if b.allNamespace {
errMsg = "a resource cannot be retrieved by name across all namespaces"
}
- return result.withError(fmt.Errorf(errMsg))
+ return result.withError(errors.New(errMsg))
}
}
@@ -1093,7 +1093,7 @@ func (b *Builder) visitByName() *Result {
if b.allNamespace {
errMsg = "a resource cannot be retrieved by name across all namespaces"
}
- return result.withError(fmt.Errorf(errMsg))
+ return result.withError(errors.New(errMsg))
}
}
diff --git a/tools/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/tools/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
index 2218b9f5b..11eaf61a1 100644
--- a/tools/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
+++ b/tools/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
@@ -27,9 +27,9 @@ import (
"strings"
"time"
- jsonpatch "github.com/evanphx/json-patch"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+ jsonpatch "gopkg.in/evanphx/json-patch.v4"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@@ -243,7 +243,7 @@ func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate {
// commands.
func StandardErrorMessage(err error) (string, bool) {
if debugErr, ok := err.(debugError); ok {
- klog.V(4).Infof(debugErr.DebugError())
+ klog.V(4).Info(debugErr.DebugError())
}
status, isStatus := err.(apierrors.APIStatus)
switch {
diff --git a/tools/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/tools/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po
index 7036ad59a..f7280b4fe 100644
--- a/tools/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po
+++ b/tools/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po
@@ -432,7 +432,7 @@ msgstr "Create a service account with the specified name"
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38
#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42
msgid "Delete the specified cluster from the kubeconfig"
-msgstr "指定したコンテキストをkubeconfigから削除する"
+msgstr "指定したクラスターをkubeconfigから削除する"
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38
#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42
diff --git a/tools/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go b/tools/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go
index fdfdf08ee..b7e1bf00f 100644
--- a/tools/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go
+++ b/tools/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go
@@ -62,7 +62,7 @@ func (p *HelpFlagPrinter) PrintHelpFlag(flag *flag.Flag) {
}
appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t")
- fmt.Fprintf(p.out, appendTabStr+"\n\n")
+ fmt.Fprint(p.out, appendTabStr+"\n\n")
}
// writeFlag will output the help flag based
diff --git a/tools/vendor/modules.txt b/tools/vendor/modules.txt
index 4f90ff244..25987f3ed 100644
--- a/tools/vendor/modules.txt
+++ b/tools/vendor/modules.txt
@@ -1,6 +1,9 @@
# cel.dev/expr v0.18.0
## explicit; go 1.21.1
cel.dev/expr
+# dario.cat/mergo v1.0.1
+## explicit; go 1.13
+dario.cat/mergo
# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
## explicit; go 1.20
github.com/AdaLogics/go-fuzz-headers
@@ -18,11 +21,11 @@ github.com/MakeNowJust/heredoc
# github.com/Masterminds/goutils v1.1.1
## explicit
github.com/Masterminds/goutils
-# github.com/Masterminds/semver/v3 v3.2.1
-## explicit; go 1.18
+# github.com/Masterminds/semver/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/semver/v3
-# github.com/Masterminds/sprig/v3 v3.2.3
-## explicit; go 1.13
+# github.com/Masterminds/sprig/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/sprig/v3
# github.com/Masterminds/squirrel v1.5.4
## explicit; go 1.14
@@ -37,8 +40,8 @@ github.com/Microsoft/go-winio/internal/stringbuffer
github.com/Microsoft/go-winio/pkg/bindfilter
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/vhd
-# github.com/Microsoft/hcsshim v0.12.5
-## explicit; go 1.21
+# github.com/Microsoft/hcsshim v0.12.9
+## explicit; go 1.22
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
github.com/Microsoft/hcsshim/internal/cow
@@ -92,11 +95,8 @@ github.com/chai2010/gettext-go/po
# github.com/containerd/cgroups/v3 v3.0.3
## explicit; go 1.18
github.com/containerd/cgroups/v3/cgroup1/stats
-# github.com/containerd/containerd v1.7.18
+# github.com/containerd/containerd v1.7.27
## explicit; go 1.21
-github.com/containerd/containerd/api/events
-github.com/containerd/containerd/api/runtime/sandbox/v1
-github.com/containerd/containerd/api/types
github.com/containerd/containerd/archive
github.com/containerd/containerd/archive/compression
github.com/containerd/containerd/archive/tarheader
@@ -120,14 +120,11 @@ github.com/containerd/containerd/namespaces
github.com/containerd/containerd/pkg/cleanup
github.com/containerd/containerd/pkg/epoch
github.com/containerd/containerd/pkg/randutil
-github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/protobuf
-github.com/containerd/containerd/protobuf/plugin
github.com/containerd/containerd/protobuf/proto
github.com/containerd/containerd/protobuf/types
github.com/containerd/containerd/reference
-github.com/containerd/containerd/reference/docker
github.com/containerd/containerd/remotes
github.com/containerd/containerd/remotes/docker
github.com/containerd/containerd/remotes/docker/auth
@@ -137,25 +134,39 @@ github.com/containerd/containerd/sandbox
github.com/containerd/containerd/snapshots
github.com/containerd/containerd/tracing
github.com/containerd/containerd/version
-# github.com/containerd/continuity v0.4.2
-## explicit; go 1.19
+# github.com/containerd/containerd/api v1.8.0
+## explicit; go 1.21
+github.com/containerd/containerd/api/events
+github.com/containerd/containerd/api/runtime/sandbox/v1
+github.com/containerd/containerd/api/types
+# github.com/containerd/continuity v0.4.4
+## explicit; go 1.21
+github.com/containerd/continuity/devices
github.com/containerd/continuity/fs
github.com/containerd/continuity/sysx
-# github.com/containerd/errdefs v0.1.0
+# github.com/containerd/errdefs v0.3.0
## explicit; go 1.20
github.com/containerd/errdefs
+# github.com/containerd/errdefs/pkg v0.3.0
+## explicit; go 1.22
+github.com/containerd/errdefs/pkg/errgrpc
+github.com/containerd/errdefs/pkg/internal/cause
+github.com/containerd/errdefs/pkg/internal/types
# github.com/containerd/log v0.1.0
## explicit; go 1.20
github.com/containerd/log
-# github.com/containerd/stargz-snapshotter/estargz v0.15.1
-## explicit; go 1.19
+# github.com/containerd/platforms v0.2.1
+## explicit; go 1.20
+github.com/containerd/platforms
+# github.com/containerd/stargz-snapshotter/estargz v0.16.3
+## explicit; go 1.22.0
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
-# github.com/containerd/ttrpc v1.2.4
+# github.com/containerd/ttrpc v1.2.7
## explicit; go 1.19
github.com/containerd/ttrpc
-# github.com/containerd/typeurl/v2 v2.1.1
-## explicit; go 1.13
+# github.com/containerd/typeurl/v2 v2.2.0
+## explicit; go 1.21
github.com/containerd/typeurl/v2
# github.com/containers/common v0.60.4
## explicit; go 1.21.0
@@ -205,8 +216,8 @@ github.com/containers/libtrust
# github.com/containers/ocicrypt v1.2.0
## explicit; go 1.21
github.com/containers/ocicrypt/spec
-# github.com/containers/storage v1.55.0
-## explicit; go 1.21
+# github.com/containers/storage v1.57.2
+## explicit; go 1.22.0
github.com/containers/storage/pkg/fileutils
github.com/containers/storage/pkg/homedir
github.com/containers/storage/pkg/idtools
@@ -218,8 +229,8 @@ github.com/containers/storage/pkg/reexec
github.com/containers/storage/pkg/regexp
github.com/containers/storage/pkg/system
github.com/containers/storage/pkg/unshare
-# github.com/cyphar/filepath-securejoin v0.3.1
-## explicit; go 1.20
+# github.com/cyphar/filepath-securejoin v0.3.6
+## explicit; go 1.18
github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
## explicit
@@ -227,7 +238,7 @@ github.com/davecgh/go-spew/spew
# github.com/distribution/reference v0.6.0
## explicit; go 1.20
github.com/distribution/reference
-# github.com/docker/cli v27.2.0+incompatible
+# github.com/docker/cli v27.3.1+incompatible
## explicit
github.com/docker/cli/cli/config
github.com/docker/cli/cli/config/configfile
@@ -270,7 +281,7 @@ github.com/docker/go-units
## explicit; go 1.13
github.com/emicklei/go-restful/v3
github.com/emicklei/go-restful/v3/log
-# github.com/evanphx/json-patch v5.7.0+incompatible
+# github.com/evanphx/json-patch v5.9.0+incompatible
## explicit
github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.9.0
@@ -283,15 +294,16 @@ github.com/exponent-io/jsonpath
# github.com/fatih/color v1.18.0
## explicit; go 1.17
github.com/fatih/color
-# github.com/fatih/structtag v1.1.0
+# github.com/fatih/structtag v1.2.0
## explicit; go 1.12
github.com/fatih/structtag
# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
-# github.com/fsnotify/fsnotify v1.7.0
+# github.com/fsnotify/fsnotify v1.8.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
+github.com/fsnotify/fsnotify/internal
# github.com/fxamacker/cbor/v2 v2.7.0
## explicit; go 1.17
github.com/fxamacker/cbor/v2
@@ -304,11 +316,11 @@ github.com/go-git/gcfg
github.com/go-git/gcfg/scanner
github.com/go-git/gcfg/token
github.com/go-git/gcfg/types
-# github.com/go-git/go-billy/v5 v5.5.0
-## explicit; go 1.19
+# github.com/go-git/go-billy/v5 v5.6.1
+## explicit; go 1.21
github.com/go-git/go-billy/v5
-# github.com/go-git/go-git/v5 v5.11.0
-## explicit; go 1.19
+# github.com/go-git/go-git/v5 v5.13.1
+## explicit; go 1.21
github.com/go-git/go-git/v5/internal/path_util
github.com/go-git/go-git/v5/plumbing/format/config
github.com/go-git/go-git/v5/plumbing/format/gitignore
@@ -337,6 +349,10 @@ github.com/go-openapi/jsonreference/internal
# github.com/go-openapi/swag v0.23.0
## explicit; go 1.20
github.com/go-openapi/swag
+# github.com/go-viper/mapstructure/v2 v2.2.1
+## explicit; go 1.18
+github.com/go-viper/mapstructure/v2
+github.com/go-viper/mapstructure/v2/internal/errors
# github.com/gobuffalo/envy v1.6.5
## explicit
github.com/gobuffalo/envy
@@ -357,8 +373,8 @@ github.com/gobwas/glob/util/strings
## explicit; go 1.15
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/sortkeys
-# github.com/golang-migrate/migrate/v4 v4.17.1
-## explicit; go 1.18
+# github.com/golang-migrate/migrate/v4 v4.18.1
+## explicit; go 1.22.0
github.com/golang-migrate/migrate/v4/source
github.com/golang-migrate/migrate/v4/source/file
github.com/golang-migrate/migrate/v4/source/iofs
@@ -409,7 +425,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/go-containerregistry v0.20.0
+# github.com/google/go-containerregistry v0.20.1
## explicit; go 1.18
github.com/google/go-containerregistry/internal/and
github.com/google/go-containerregistry/internal/compression
@@ -463,8 +479,8 @@ github.com/gosuri/uitable/util/wordwrap
# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
## explicit
github.com/gregjones/httpcache
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0
-## explicit; go 1.20
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0
+## explicit; go 1.21
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -483,38 +499,23 @@ github.com/hashicorp/errwrap
# github.com/hashicorp/go-multierror v1.1.1
## explicit; go 1.13
github.com/hashicorp/go-multierror
-# github.com/hashicorp/hcl v1.0.0
-## explicit
-github.com/hashicorp/hcl
-github.com/hashicorp/hcl/hcl/ast
-github.com/hashicorp/hcl/hcl/parser
-github.com/hashicorp/hcl/hcl/printer
-github.com/hashicorp/hcl/hcl/scanner
-github.com/hashicorp/hcl/hcl/strconv
-github.com/hashicorp/hcl/hcl/token
-github.com/hashicorp/hcl/json/parser
-github.com/hashicorp/hcl/json/scanner
-github.com/hashicorp/hcl/json/token
-# github.com/huandu/xstrings v1.4.0
+# github.com/huandu/xstrings v1.5.0
## explicit; go 1.12
github.com/huandu/xstrings
# github.com/iancoleman/strcase v0.3.0
## explicit; go 1.16
github.com/iancoleman/strcase
-# github.com/imdario/mergo v0.3.16
-## explicit; go 1.13
-github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
# github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
## explicit
github.com/jbenet/go-context/io
-# github.com/jmoiron/sqlx v1.3.5
+# github.com/jmoiron/sqlx v1.4.0
## explicit; go 1.10
github.com/jmoiron/sqlx
github.com/jmoiron/sqlx/reflectx
-# github.com/joelanford/ignore v0.1.0
+# github.com/joelanford/ignore v0.1.1
## explicit; go 1.21
github.com/joelanford/ignore
# github.com/joho/godotenv v1.3.0
@@ -526,8 +527,8 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.9
-## explicit; go 1.20
+# github.com/klauspost/compress v1.17.11
+## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -552,9 +553,6 @@ github.com/lib/pq/scram
# github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
## explicit
github.com/liggitt/tabwriter
-# github.com/magiconair/properties v1.8.7
-## explicit; go 1.19
-github.com/magiconair/properties
# github.com/mailru/easyjson v0.7.7
## explicit; go 1.12
github.com/mailru/easyjson/buffer
@@ -572,7 +570,7 @@ github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.16
## explicit; go 1.9
github.com/mattn/go-runewidth
-# github.com/mattn/go-sqlite3 v1.14.22
+# github.com/mattn/go-sqlite3 v1.14.24
## explicit; go 1.19
github.com/mattn/go-sqlite3
# github.com/mitchellh/copystructure v1.2.0
@@ -584,9 +582,6 @@ github.com/mitchellh/go-homedir
# github.com/mitchellh/go-wordwrap v1.0.1
## explicit; go 1.14
github.com/mitchellh/go-wordwrap
-# github.com/mitchellh/mapstructure v1.5.0
-## explicit; go 1.14
-github.com/mitchellh/mapstructure
# github.com/mitchellh/reflectwalk v1.0.2
## explicit
github.com/mitchellh/reflectwalk
@@ -597,15 +592,21 @@ github.com/moby/locker
## explicit; go 1.13
github.com/moby/spdystream
github.com/moby/spdystream/spdy
+# github.com/moby/sys/capability v0.4.0
+## explicit; go 1.21
+github.com/moby/sys/capability
# github.com/moby/sys/mountinfo v0.7.2
## explicit; go 1.17
github.com/moby/sys/mountinfo
# github.com/moby/sys/sequential v0.5.0
## explicit; go 1.17
github.com/moby/sys/sequential
-# github.com/moby/sys/user v0.2.0
-## explicit; go 1.21
+# github.com/moby/sys/user v0.3.0
+## explicit; go 1.17
github.com/moby/sys/user
+# github.com/moby/sys/userns v0.1.0
+## explicit; go 1.21
+github.com/moby/sys/userns
# github.com/moby/term v0.5.0
## explicit; go 1.18
github.com/moby/term
@@ -639,8 +640,8 @@ github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runtime-spec v1.2.0
## explicit
github.com/opencontainers/runtime-spec/specs-go
-# github.com/operator-framework/ansible-operator-plugins v1.36.1
-## explicit; go 1.22.0
+# github.com/operator-framework/ansible-operator-plugins v1.37.2
+## explicit; go 1.23.4
github.com/operator-framework/ansible-operator-plugins/internal/version
github.com/operator-framework/ansible-operator-plugins/pkg/plugins
github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1
@@ -656,7 +657,7 @@ github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/sc
github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/playbooks
github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/roles
github.com/operator-framework/ansible-operator-plugins/pkg/plugins/util
-# github.com/operator-framework/api v0.24.0
+# github.com/operator-framework/api v0.27.0
## explicit; go 1.22.0
github.com/operator-framework/api/pkg/apis/scorecard/v1alpha3
github.com/operator-framework/api/pkg/constraints
@@ -671,16 +672,17 @@ github.com/operator-framework/api/pkg/validation
github.com/operator-framework/api/pkg/validation/errors
github.com/operator-framework/api/pkg/validation/interfaces
github.com/operator-framework/api/pkg/validation/internal
-# github.com/operator-framework/operator-manifest-tools v0.6.0
-## explicit; go 1.21
+# github.com/operator-framework/operator-manifest-tools v0.8.0
+## explicit; go 1.22.0
github.com/operator-framework/operator-manifest-tools/internal/utils
github.com/operator-framework/operator-manifest-tools/pkg/image
github.com/operator-framework/operator-manifest-tools/pkg/imagename
github.com/operator-framework/operator-manifest-tools/pkg/imageresolver
github.com/operator-framework/operator-manifest-tools/pkg/pullspec
-# github.com/operator-framework/operator-registry v1.42.0
-## explicit; go 1.22.0
+# github.com/operator-framework/operator-registry v1.49.0
+## explicit; go 1.22.7
github.com/operator-framework/operator-registry/alpha/action
+github.com/operator-framework/operator-registry/alpha/action/migrations
github.com/operator-framework/operator-registry/alpha/declcfg
github.com/operator-framework/operator-registry/alpha/model
github.com/operator-framework/operator-registry/alpha/property
@@ -697,8 +699,8 @@ github.com/operator-framework/operator-registry/pkg/prettyunmarshaler
github.com/operator-framework/operator-registry/pkg/registry
github.com/operator-framework/operator-registry/pkg/sqlite
github.com/operator-framework/operator-registry/pkg/sqlite/migrations
-# github.com/operator-framework/operator-sdk v1.38.0
-## explicit; go 1.22.5
+# github.com/operator-framework/operator-sdk v1.39.2
+## explicit; go 1.23.4
github.com/operator-framework/operator-sdk/cmd/operator-sdk
github.com/operator-framework/operator-sdk/internal/annotations/metrics
github.com/operator-framework/operator-sdk/internal/annotations/scorecard
@@ -764,8 +766,8 @@ github.com/operator-framework/operator-sdk/internal/version
# github.com/otiai10/copy v1.14.0
## explicit; go 1.18
github.com/otiai10/copy
-# github.com/pelletier/go-toml/v2 v2.2.2
-## explicit; go 1.16
+# github.com/pelletier/go-toml/v2 v2.2.3
+## explicit; go 1.21.0
github.com/pelletier/go-toml/v2
github.com/pelletier/go-toml/v2/internal/characters
github.com/pelletier/go-toml/v2/internal/danger
@@ -780,8 +782,8 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.20.5
-## explicit; go 1.20
+# github.com/prometheus/client_golang v1.21.1
+## explicit; go 1.21
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
@@ -791,8 +793,8 @@ github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.6.1
## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.55.0
-## explicit; go 1.20
+# github.com/prometheus/common v0.62.0
+## explicit; go 1.21
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.15.1
@@ -803,24 +805,21 @@ github.com/prometheus/procfs/internal/util
# github.com/rivo/uniseg v0.4.7
## explicit; go 1.18
github.com/rivo/uniseg
-# github.com/rubenv/sql-migrate v1.5.2
-## explicit; go 1.17
+# github.com/rubenv/sql-migrate v1.7.0
+## explicit; go 1.21
github.com/rubenv/sql-migrate
github.com/rubenv/sql-migrate/sqlparse
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
-# github.com/sagikazarmark/locafero v0.4.0
-## explicit; go 1.20
+# github.com/sagikazarmark/locafero v0.7.0
+## explicit; go 1.21
github.com/sagikazarmark/locafero
-# github.com/sagikazarmark/slog-shim v0.1.0
-## explicit; go 1.20
-github.com/sagikazarmark/slog-shim
-# github.com/sergi/go-diff v1.3.1
-## explicit; go 1.12
-github.com/sergi/go-diff/diffmatchpatch
-# github.com/shopspring/decimal v1.3.1
+# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
## explicit; go 1.13
+github.com/sergi/go-diff/diffmatchpatch
+# github.com/shopspring/decimal v1.4.0
+## explicit; go 1.10
github.com/shopspring/decimal
# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
@@ -831,12 +830,12 @@ github.com/sourcegraph/conc
github.com/sourcegraph/conc/internal/multierror
github.com/sourcegraph/conc/iter
github.com/sourcegraph/conc/panics
-# github.com/spf13/afero v1.11.0
-## explicit; go 1.19
+# github.com/spf13/afero v1.14.0
+## explicit; go 1.23.0
github.com/spf13/afero
github.com/spf13/afero/internal/common
github.com/spf13/afero/mem
-# github.com/spf13/cast v1.6.0
+# github.com/spf13/cast v1.7.1
## explicit; go 1.19
github.com/spf13/cast
# github.com/spf13/cobra v1.9.1
@@ -845,14 +844,10 @@ github.com/spf13/cobra
# github.com/spf13/pflag v1.0.6
## explicit; go 1.12
github.com/spf13/pflag
-# github.com/spf13/viper v1.19.0
-## explicit; go 1.20
+# github.com/spf13/viper v1.20.0
+## explicit; go 1.21.0
github.com/spf13/viper
-github.com/spf13/viper/internal/encoding
github.com/spf13/viper/internal/encoding/dotenv
-github.com/spf13/viper/internal/encoding/hcl
-github.com/spf13/viper/internal/encoding/ini
-github.com/spf13/viper/internal/encoding/javaproperties
github.com/spf13/viper/internal/encoding/json
github.com/spf13/viper/internal/encoding/toml
github.com/spf13/viper/internal/encoding/yaml
@@ -860,9 +855,10 @@ github.com/spf13/viper/internal/features
# github.com/stoewer/go-strcase v1.3.0
## explicit; go 1.11
github.com/stoewer/go-strcase
-# github.com/stretchr/testify v1.9.0
+# github.com/stretchr/testify v1.10.0
## explicit; go 1.17
github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
github.com/stretchr/testify/require
# github.com/subosito/gotenv v1.6.0
## explicit; go 1.18
@@ -870,10 +866,10 @@ github.com/subosito/gotenv
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
github.com/syndtr/gocapability/capability
-# github.com/thoas/go-funk v0.8.0
+# github.com/thoas/go-funk v0.9.3
## explicit; go 1.13
github.com/thoas/go-funk
-# github.com/vbatts/tar-split v0.11.5
+# github.com/vbatts/tar-split v0.11.7
## explicit; go 1.17
github.com/vbatts/tar-split/archive/tar
# github.com/x448/float16 v0.8.4
@@ -901,13 +897,14 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
-## explicit; go 1.21
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0
+## explicit; go 1.22
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.28.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/baggage
@@ -920,33 +917,33 @@ go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.21.0
-go.opentelemetry.io/otel/semconv/v1.24.0
go.opentelemetry.io/otel/semconv/v1.26.0
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.28.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel/metric v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/sdk v1.28.0
-## explicit; go 1.21
+go.opentelemetry.io/otel/metric/noop
+# go.opentelemetry.io/otel/sdk v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.28.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel/trace v1.32.0
+## explicit; go 1.22
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/noop
@@ -984,16 +981,12 @@ golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
-# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
-## explicit; go 1.20
-golang.org/x/exp/constraints
+# golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67
+## explicit; go 1.22.0
golang.org/x/exp/maps
golang.org/x/exp/slices
-golang.org/x/exp/slog
-golang.org/x/exp/slog/internal
-golang.org/x/exp/slog/internal/buffer
-# golang.org/x/mod v0.23.0
-## explicit; go 1.22.0
+# golang.org/x/mod v0.24.0
+## explicit; go 1.23.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
@@ -1059,8 +1052,8 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.7.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.30.0
-## explicit; go 1.22.0
+# golang.org/x/tools v0.31.0
+## explicit; go 1.23.0
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/packages
@@ -1083,19 +1076,19 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de
-## explicit; go 1.19
+# google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1
+## explicit; go 1.21
google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7
+# google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28
## explicit; go 1.21
google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28
## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.65.0
-## explicit; go 1.21
+# google.golang.org/grpc v1.68.1
+## explicit; go 1.22
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@@ -1103,6 +1096,8 @@ google.golang.org/grpc/balancer
google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/pickfirst
+google.golang.org/grpc/balancer/pickfirst/internal
+google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -1113,7 +1108,9 @@ google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
@@ -1136,11 +1133,13 @@ google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/resolver/unix
google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/stats
google.golang.org/grpc/internal/status
google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
google.golang.org/grpc/internal/transport/networktype
google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
google.golang.org/grpc/metadata
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
@@ -1210,9 +1209,6 @@ gopkg.in/evanphx/json-patch.v4
# gopkg.in/inf.v0 v0.9.1
## explicit
gopkg.in/inf.v0
-# gopkg.in/ini.v1 v1.67.0
-## explicit
-gopkg.in/ini.v1
# gopkg.in/warnings.v0 v0.1.2
## explicit
gopkg.in/warnings.v0
@@ -1222,7 +1218,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# helm.sh/helm/v3 v3.15.4
+# helm.sh/helm/v3 v3.16.3
## explicit; go 1.22.0
helm.sh/helm/v3/internal/fileutil
helm.sh/helm/v3/internal/resolver
@@ -1435,7 +1431,7 @@ k8s.io/apiserver/pkg/util/feature
k8s.io/apiserver/pkg/util/webhook
k8s.io/apiserver/pkg/util/x509metrics
k8s.io/apiserver/pkg/warning
-# k8s.io/cli-runtime v0.30.5
+# k8s.io/cli-runtime v0.31.7
## explicit; go 1.22.0
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/genericiooptions
@@ -1650,7 +1646,7 @@ k8s.io/kube-openapi/pkg/validation/spec
k8s.io/kube-openapi/pkg/validation/strfmt
k8s.io/kube-openapi/pkg/validation/strfmt/bson
k8s.io/kube-openapi/pkg/validation/validate
-# k8s.io/kubectl v0.30.5
+# k8s.io/kubectl v0.31.7
## explicit; go 1.22.0
k8s.io/kubectl/pkg/cmd/util
k8s.io/kubectl/pkg/scheme
@@ -1697,7 +1693,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/controller-runtime v0.18.5
+# sigs.k8s.io/controller-runtime v0.19.7
## explicit; go 1.22.0
sigs.k8s.io/controller-runtime/pkg/client
sigs.k8s.io/controller-runtime/pkg/client/apiutil
@@ -1734,7 +1730,7 @@ sigs.k8s.io/controller-tools/pkg/webhook
## explicit; go 1.21
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
-# sigs.k8s.io/kubebuilder/v4 v4.1.1
+# sigs.k8s.io/kubebuilder/v4 v4.2.0
## explicit; go 1.22
sigs.k8s.io/kubebuilder/v4/pkg/cli
sigs.k8s.io/kubebuilder/v4/pkg/cli/alpha
@@ -1757,6 +1753,7 @@ sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/te
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/crd/patches
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/manager
+sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/prometheus
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/rbac
sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/samples
diff --git a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
index 3c0206bea..1d4ce264c 100644
--- a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
+++ b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
@@ -72,7 +72,10 @@ func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper m
// IsGVKNamespaced returns true if the object having the provided
// GVK is namespace scoped.
func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) {
- restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind})
+ // Fetch the RESTMapping using the complete GVK. If we exclude the Version, the Version set
+ // will be populated using the cached Group if available. This can lead to failures updating
+ // the cache with new Versions of CRDs registered at runtime.
+ restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version)
if err != nil {
return false, fmt.Errorf("failed to get restmapping: %w", err)
}
diff --git a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
index 451f7b2a1..fe9862b81 100644
--- a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
+++ b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
@@ -50,28 +50,10 @@ type Options struct {
// Cache, if provided, is used to read objects from the cache.
Cache *CacheOptions
- // WarningHandler is used to configure the warning handler responsible for
- // surfacing and handling warnings messages sent by the API server.
- WarningHandler WarningHandlerOptions
-
// DryRun instructs the client to only perform dry run requests.
DryRun *bool
}
-// WarningHandlerOptions are options for configuring a
-// warning handler for the client which is responsible
-// for surfacing API Server warnings.
-type WarningHandlerOptions struct {
- // SuppressWarnings decides if the warnings from the
- // API server are suppressed or surfaced in the client.
- SuppressWarnings bool
- // AllowDuplicateLogs does not deduplicate the to-be
- // logged surfaced warnings messages. See
- // log.WarningHandlerOptions for considerations
- // regarding deduplication
- AllowDuplicateLogs bool
-}
-
// CacheOptions are options for creating a cache-backed client.
type CacheOptions struct {
// Reader is a cache-backed reader that will be used to read objects from the cache.
@@ -91,6 +73,12 @@ type NewClientFunc func(config *rest.Config, options Options) (Client, error)
// New returns a new Client using the provided config and Options.
//
+// By default, the client surfaces warnings returned by the server. To
+// suppress warnings, set config.WarningHandler = rest.NoWarnings{}. To
+// define custom behavior, implement the rest.WarningHandler interface.
+// See [sigs.k8s.io/controller-runtime/pkg/log.KubeAPIWarningLogger] for
+// an example.
+//
// The client's read behavior is determined by Options.Cache.
// If either Options.Cache or Options.Cache.Reader is nil,
// the client reads directly from the API server.
@@ -124,15 +112,14 @@ func newClient(config *rest.Config, options Options) (*client, error) {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
- // By default, we de-duplicate and surface warnings.
- config.WarningHandler = log.NewKubeAPIWarningLogger(
- log.Log.WithName("KubeAPIWarningLogger"),
- log.KubeAPIWarningLoggerOptions{
- Deduplicate: !options.WarningHandler.AllowDuplicateLogs,
- },
- )
- if options.WarningHandler.SuppressWarnings {
- config.WarningHandler = rest.NoWarnings{}
+ if config.WarningHandler == nil {
+ // By default, we de-duplicate and surface warnings.
+ config.WarningHandler = log.NewKubeAPIWarningLogger(
+ log.Log.WithName("KubeAPIWarningLogger"),
+ log.KubeAPIWarningLoggerOptions{
+ Deduplicate: true,
+ },
+ )
}
// Use the rest HTTP client for the provided config if unset
diff --git a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go
new file mode 100644
index 000000000..659b3d44c
--- /dev/null
+++ b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// WithFieldValidation wraps a Client and configures field validation, by
+// default, for all write requests from this client. Users can override field
+// validation for individual write requests.
+func WithFieldValidation(c Client, validation FieldValidation) Client {
+ return &clientWithFieldValidation{
+ validation: validation,
+ client: c,
+ Reader: c,
+ }
+}
+
+type clientWithFieldValidation struct {
+ validation FieldValidation
+ client Client
+ Reader
+}
+
+func (c *clientWithFieldValidation) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ return c.client.Create(ctx, obj, append([]CreateOption{c.validation}, opts...)...)
+}
+
+func (c *clientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ return c.client.Update(ctx, obj, append([]UpdateOption{c.validation}, opts...)...)
+}
+
+func (c *clientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ return c.client.Patch(ctx, obj, patch, append([]PatchOption{c.validation}, opts...)...)
+}
+
+func (c *clientWithFieldValidation) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ return c.client.Delete(ctx, obj, opts...)
+}
+
+func (c *clientWithFieldValidation) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ return c.client.DeleteAllOf(ctx, obj, opts...)
+}
+
+func (c *clientWithFieldValidation) Scheme() *runtime.Scheme { return c.client.Scheme() }
+func (c *clientWithFieldValidation) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() }
+func (c *clientWithFieldValidation) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
+ return c.client.GroupVersionKindFor(obj)
+}
+
+func (c *clientWithFieldValidation) IsObjectNamespaced(obj runtime.Object) (bool, error) {
+ return c.client.IsObjectNamespaced(obj)
+}
+
+func (c *clientWithFieldValidation) Status() StatusWriter {
+ return &subresourceClientWithFieldValidation{
+ validation: c.validation,
+ subresourceWriter: c.client.Status(),
+ }
+}
+
+func (c *clientWithFieldValidation) SubResource(subresource string) SubResourceClient {
+ srClient := c.client.SubResource(subresource)
+ return &subresourceClientWithFieldValidation{
+ validation: c.validation,
+ subresourceWriter: srClient,
+ SubResourceReader: srClient,
+ }
+}
+
+type subresourceClientWithFieldValidation struct {
+ validation FieldValidation
+ subresourceWriter SubResourceWriter
+ SubResourceReader
+}
+
+func (c *subresourceClientWithFieldValidation) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error {
+ return c.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{c.validation}, opts...)...)
+}
+
+func (c *subresourceClientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error {
+ return c.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{c.validation}, opts...)...)
+}
+
+func (c *subresourceClientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error {
+ return c.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{c.validation}, opts...)...)
+}
diff --git a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
index 798506f48..db50ed8fe 100644
--- a/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
+++ b/tools/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
@@ -169,6 +169,39 @@ func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) {
opts.FieldManager = string(f)
}
+// FieldValidation configures field validation for the given requests.
+type FieldValidation string
+
+// ApplyToPatch applies this configuration to the given patch options.
+func (f FieldValidation) ApplyToPatch(opts *PatchOptions) {
+ opts.FieldValidation = string(f)
+}
+
+// ApplyToCreate applies this configuration to the given create options.
+func (f FieldValidation) ApplyToCreate(opts *CreateOptions) {
+ opts.FieldValidation = string(f)
+}
+
+// ApplyToUpdate applies this configuration to the given update options.
+func (f FieldValidation) ApplyToUpdate(opts *UpdateOptions) {
+ opts.FieldValidation = string(f)
+}
+
+// ApplyToSubResourcePatch applies this configuration to the given patch options.
+func (f FieldValidation) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) {
+ opts.FieldValidation = string(f)
+}
+
+// ApplyToSubResourceCreate applies this configuration to the given create options.
+func (f FieldValidation) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) {
+ opts.FieldValidation = string(f)
+}
+
+// ApplyToSubResourceUpdate applies this configuration to the given update options.
+func (f FieldValidation) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) {
+ opts.FieldValidation = string(f)
+}
+
// }}}
// {{{ Create Options
@@ -187,6 +220,24 @@ type CreateOptions struct {
// this request. It must be set with server-side apply.
FieldManager string
+ // fieldValidation instructs the server on how to handle
+ // objects in the request (POST/PUT/PATCH) containing unknown
+ // or duplicate fields. Valid values are:
+ // - Ignore: This will ignore any unknown fields that are silently
+ // dropped from the object, and will ignore all but the last duplicate
+ // field that the decoder encounters. This is the default behavior
+ // prior to v1.23.
+ // - Warn: This will send a warning via the standard warning response
+ // header for each unknown field that is dropped from the object, and
+ // for each duplicate field that is encountered. The request will
+ // still succeed if there are no other errors, and will only persist
+ // the last of any duplicate fields. This is the default in v1.23+
+ // - Strict: This will fail the request with a BadRequest error if
+ // any unknown fields would be dropped from the object, or if any
+ // duplicate fields are present. The error returned from the server
+ // will contain all unknown and duplicate fields encountered.
+ FieldValidation string
+
// Raw represents raw CreateOptions, as passed to the API server.
Raw *metav1.CreateOptions
}
@@ -203,6 +254,7 @@ func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions {
o.Raw.DryRun = o.DryRun
o.Raw.FieldManager = o.FieldManager
+ o.Raw.FieldValidation = o.FieldValidation
return o.Raw
}
@@ -223,6 +275,9 @@ func (o *CreateOptions) ApplyToCreate(co *CreateOptions) {
if o.FieldManager != "" {
co.FieldManager = o.FieldManager
}
+ if o.FieldValidation != "" {
+ co.FieldValidation = o.FieldValidation
+ }
if o.Raw != nil {
co.Raw = o.Raw
}
@@ -679,6 +734,24 @@ type UpdateOptions struct {
// this request. It must be set with server-side apply.
FieldManager string
+ // fieldValidation instructs the server on how to handle
+ // objects in the request (POST/PUT/PATCH) containing unknown
+ // or duplicate fields. Valid values are:
+ // - Ignore: This will ignore any unknown fields that are silently
+ // dropped from the object, and will ignore all but the last duplicate
+ // field that the decoder encounters. This is the default behavior
+ // prior to v1.23.
+ // - Warn: This will send a warning via the standard warning response
+ // header for each unknown field that is dropped from the object, and
+ // for each duplicate field that is encountered. The request will
+ // still succeed if there are no other errors, and will only persist
+ // the last of any duplicate fields. This is the default in v1.23+
+ // - Strict: This will fail the request with a BadRequest error if
+ // any unknown fields would be dropped from the object, or if any
+ // duplicate fields are present. The error returned from the server
+ // will contain all unknown and duplicate fields encountered.
+ FieldValidation string
+
// Raw represents raw UpdateOptions, as passed to the API server.
Raw *metav1.UpdateOptions
}
@@ -695,6 +768,7 @@ func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions {
o.Raw.DryRun = o.DryRun
o.Raw.FieldManager = o.FieldManager
+ o.Raw.FieldValidation = o.FieldValidation
return o.Raw
}
@@ -717,6 +791,9 @@ func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) {
if o.FieldManager != "" {
uo.FieldManager = o.FieldManager
}
+ if o.FieldValidation != "" {
+ uo.FieldValidation = o.FieldValidation
+ }
if o.Raw != nil {
uo.Raw = o.Raw
}
@@ -745,6 +822,24 @@ type PatchOptions struct {
// this request. It must be set with server-side apply.
FieldManager string
+ // fieldValidation instructs the server on how to handle
+ // objects in the request (POST/PUT/PATCH) containing unknown
+ // or duplicate fields. Valid values are:
+ // - Ignore: This will ignore any unknown fields that are silently
+ // dropped from the object, and will ignore all but the last duplicate
+ // field that the decoder encounters. This is the default behavior
+ // prior to v1.23.
+ // - Warn: This will send a warning via the standard warning response
+ // header for each unknown field that is dropped from the object, and
+ // for each duplicate field that is encountered. The request will
+ // still succeed if there are no other errors, and will only persist
+ // the last of any duplicate fields. This is the default in v1.23+
+ // - Strict: This will fail the request with a BadRequest error if
+ // any unknown fields would be dropped from the object, or if any
+ // duplicate fields are present. The error returned from the server
+ // will contain all unknown and duplicate fields encountered.
+ FieldValidation string
+
// Raw represents raw PatchOptions, as passed to the API server.
Raw *metav1.PatchOptions
}
@@ -771,6 +866,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions {
o.Raw.DryRun = o.DryRun
o.Raw.Force = o.Force
o.Raw.FieldManager = o.FieldManager
+ o.Raw.FieldValidation = o.FieldValidation
return o.Raw
}
@@ -787,6 +883,9 @@ func (o *PatchOptions) ApplyToPatch(po *PatchOptions) {
if o.FieldManager != "" {
po.FieldManager = o.FieldManager
}
+ if o.FieldValidation != "" {
+ po.FieldValidation = o.FieldValidation
+ }
if o.Raw != nil {
po.Raw = o.Raw
}
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugin/util/util.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugin/util/util.go
index 407e32584..149b239a0 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugin/util/util.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugin/util/util.go
@@ -218,60 +218,6 @@ func CommentCode(filename, target, prefix string) error {
return os.WriteFile(filename, out.Bytes(), 0644)
}
-// ImplementWebhooks will mock an webhook data
-func ImplementWebhooks(filename string) error {
- // false positive
- // nolint:gosec
- bs, err := os.ReadFile(filename)
- if err != nil {
- return err
- }
- str := string(bs)
-
- str, err = EnsureExistAndReplace(
- str,
- "import (",
- `import (
- "errors"`)
- if err != nil {
- return err
- }
-
- // implement defaulting webhook logic
- str, err = EnsureExistAndReplace(
- str,
- "// TODO(user): fill in your defaulting logic.",
- `if r.Spec.Count == 0 {
- r.Spec.Count = 5
- }`)
- if err != nil {
- return err
- }
-
- // implement validation webhook logic
- str, err = EnsureExistAndReplace(
- str,
- "// TODO(user): fill in your validation logic upon object creation.",
- `if r.Spec.Count < 0 {
- return nil, errors.New(".spec.count must >= 0")
- }`)
- if err != nil {
- return err
- }
- str, err = EnsureExistAndReplace(
- str,
- "// TODO(user): fill in your validation logic upon object update.",
- `if r.Spec.Count < 0 {
- return nil, errors.New(".spec.count must >= 0")
- }`)
- if err != nil {
- return err
- }
- // false positive
- // nolint:gosec
- return os.WriteFile(filename, []byte(str), 0644)
-}
-
// EnsureExistAndReplace check if the content exists and then do the replace
func EnsureExistAndReplace(input, match, replace string) (string, error) {
if !strings.Contains(input, match) {
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/plugin.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/plugin.go
index c47e8067f..29623e256 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/plugin.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/plugin.go
@@ -25,7 +25,7 @@ import (
)
// KustomizeVersion is the kubernetes-sigs/kustomize version to be used in the project
-const KustomizeVersion = "v5.4.2"
+const KustomizeVersion = "v5.4.3"
const pluginName = "kustomize.common." + plugins.DefaultNameQualifier
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/init.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/init.go
index 0d9442787..ccddd7a09 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/init.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/init.go
@@ -24,6 +24,7 @@ import (
"sigs.k8s.io/kubebuilder/v4/pkg/plugins"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/manager"
+ network_policy "sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/prometheus"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/rbac"
)
@@ -79,6 +80,8 @@ func (s *initScaffolder) Scaffold() error {
&kdefault.ManagerMetricsPatch{},
&manager.Config{Image: imageName},
&kdefault.Kustomization{},
+ &network_policy.Kustomization{},
+ &network_policy.NetworkPolicyAllowMetrics{},
&prometheus.Kustomization{},
&prometheus.Monitor{},
}
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/kustomization.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/kustomization.go
index 7c9f9ec84..8f61d3455 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/kustomization.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/kustomization.go
@@ -72,6 +72,11 @@ resources:
#- ../prometheus
# [METRICS] Expose the controller manager metrics service.
- metrics_service.yaml
+# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
+# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
+# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
+# be able to communicate with the Webhook Server.
+#- ../network-policy
# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager
patches:
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-metrics-traffic.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-metrics-traffic.go
new file mode 100644
index 000000000..e9aa320f2
--- /dev/null
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-metrics-traffic.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network_policy
+
+import (
+ "path/filepath"
+
+ "sigs.k8s.io/kubebuilder/v4/pkg/machinery"
+)
+
+var _ machinery.Template = &NetworkPolicyAllowMetrics{}
+
+// NetworkPolicyAllowMetrics scaffolds a file that defines the NetworkPolicy
+// to allow access to the metrics endpoint
+type NetworkPolicyAllowMetrics struct {
+ machinery.TemplateMixin
+ machinery.ProjectNameMixin
+}
+
+// SetTemplateDefaults implements file.Template
+func (f *NetworkPolicyAllowMetrics) SetTemplateDefaults() error {
+ if f.Path == "" {
+ f.Path = filepath.Join("config", "network-policy", "allow-metrics-traffic.yaml")
+ }
+
+ f.TemplateBody = metricsNetworkPolicyTemplate
+
+ return nil
+}
+
+const metricsNetworkPolicyTemplate = `# This NetworkPolicy allows ingress traffic
+# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
+# namespaces are able to gathering data from the metrics endpoint.
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ labels:
+ app.kubernetes.io/name: {{ .ProjectName }}
+ app.kubernetes.io/managed-by: kustomize
+ name: allow-metrics-traffic
+ namespace: system
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ # This allows ingress traffic from any namespace with the label metrics: enabled
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ metrics: enabled # Only from namespaces with this label
+ ports:
+ - port: 8443
+ protocol: TCP
+`
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-webhook-traffic.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-webhook-traffic.go
new file mode 100644
index 000000000..7203ef092
--- /dev/null
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/allow-webhook-traffic.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network_policy
+
+import (
+ "path/filepath"
+
+ "sigs.k8s.io/kubebuilder/v4/pkg/machinery"
+)
+
+var _ machinery.Template = &NetworkPolicyAllowWebhooks{}
+
+// NetworkPolicyAllowWebhooks in scaffolds a file that defines the NetworkPolicy
+// to allow the webhook server can communicate
+type NetworkPolicyAllowWebhooks struct {
+ machinery.TemplateMixin
+ machinery.ProjectNameMixin
+}
+
+// SetTemplateDefaults implements file.Template
+func (f *NetworkPolicyAllowWebhooks) SetTemplateDefaults() error {
+ if f.Path == "" {
+ f.Path = filepath.Join("config", "network-policy", "allow-webhook-traffic.yaml")
+ }
+
+ f.TemplateBody = webhooksNetworkPolicyTemplate
+
+ return nil
+}
+
+const webhooksNetworkPolicyTemplate = `# This NetworkPolicy allows ingress traffic to your webhook server running
+# as part of the controller-manager from specific namespaces and pods. CR(s) which uses webhooks
+# will only work when applied in namespaces labeled with 'webhook: enabled'
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ labels:
+ app.kubernetes.io/name: {{ .ProjectName }}
+ app.kubernetes.io/managed-by: kustomize
+ name: allow-webhook-traffic
+ namespace: system
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ # This allows ingress traffic from any namespace with the label webhook: enabled
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ webhook: enabled # Only from namespaces with this label
+ ports:
+ - port: 443
+ protocol: TCP
+`
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/kustomization.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/kustomization.go
new file mode 100644
index 000000000..f90268b85
--- /dev/null
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy/kustomization.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network_policy
+
+import (
+ "path/filepath"
+
+ "sigs.k8s.io/kubebuilder/v4/pkg/machinery"
+)
+
+var _ machinery.Template = &Kustomization{}
+
+// Kustomization scaffolds a file that defines the kustomization scheme for the prometheus folder
+type Kustomization struct {
+ machinery.TemplateMixin
+}
+
+// SetTemplateDefaults implements file.Template
+func (f *Kustomization) SetTemplateDefaults() error {
+ if f.Path == "" {
+ f.Path = filepath.Join("config", "network-policy", "kustomization.yaml")
+ }
+
+ f.TemplateBody = kustomizationTemplate
+
+ return nil
+}
+
+const kustomizationTemplate = `resources:
+- allow-metrics-traffic.yaml
+`
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/webhook.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/webhook.go
index bdb2b00b8..827dd7248 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/webhook.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/webhook.go
@@ -30,6 +30,7 @@ import (
"sigs.k8s.io/kubebuilder/v4/pkg/plugins"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/certmanager"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault"
+ network_policy "sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/network-policy"
"sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/webhook"
)
@@ -83,13 +84,22 @@ func (s *webhookScaffolder) Scaffold() error {
&certmanager.KustomizeConfig{},
&patches.EnableWebhookPatch{},
&patches.EnableCAInjectionPatch{},
+ &network_policy.NetworkPolicyAllowWebhooks{},
&crd.Kustomization{},
); err != nil {
return fmt.Errorf("error scaffolding kustomize webhook manifests: %v", err)
}
+ policyKustomizeFilePath := "config/network-policy/kustomization.yaml"
+ err := pluginutil.InsertCodeIfNotExist(policyKustomizeFilePath,
+ "resources:", allowWebhookTrafficFragment)
+ if err != nil {
+ log.Errorf("Unable to add the line '- allow-webhook-traffic.yaml' at the end of the file"+
+ "%s to allow webhook traffic.", policyKustomizeFilePath)
+ }
+
kustomizeFilePath := "config/default/kustomization.yaml"
- err := pluginutil.UncommentCode(kustomizeFilePath, "#- ../webhook", `#`)
+ err = pluginutil.UncommentCode(kustomizeFilePath, "#- ../webhook", `#`)
if err != nil {
hasWebHookUncommented, err := pluginutil.HasFragment(kustomizeFilePath, "- ../webhook")
if !hasWebHookUncommented || err != nil {
@@ -137,3 +147,6 @@ func (s *webhookScaffolder) Scaffold() error {
return nil
}
+
+const allowWebhookTrafficFragment = `
+- allow-webhook-traffic.yaml`
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/init.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/init.go
index 847e699e2..9700a606c 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/init.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/init.go
@@ -35,11 +35,11 @@ import (
const (
// ControllerRuntimeVersion is the kubernetes-sigs/controller-runtime version to be used in the project
- ControllerRuntimeVersion = "v0.18.4"
+ ControllerRuntimeVersion = "v0.19.0"
// ControllerToolsVersion is the kubernetes-sigs/controller-tools version to be used in the project
- ControllerToolsVersion = "v0.15.0"
+ ControllerToolsVersion = "v0.16.1"
// EnvtestK8SVersion is the k8s version used to do the scaffold
- EnvtestK8SVersion = "1.30.0"
+ EnvtestK8SVersion = "1.31.0"
imageName = "controller:latest"
)
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_suitetest.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_suitetest.go
index ced8d7ae5..9c8258665 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_suitetest.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_suitetest.go
@@ -255,8 +255,8 @@ var _ = BeforeSuite(func() {
})
var _ = AfterSuite(func() {
- cancel()
By("tearing down the test environment")
+ cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/controllers/controller_suitetest.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/controllers/controller_suitetest.go
index cafb287a0..e2f8796bc 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/controllers/controller_suitetest.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/controllers/controller_suitetest.go
@@ -159,6 +159,8 @@ import (
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
+var ctx context.Context
+var cancel context.CancelFunc
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
@@ -169,6 +171,8 @@ func TestControllers(t *testing.T) {
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+ ctx, cancel = context.WithCancel(context.TODO())
+
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join({{ .CRDDirectoryRelativePath }}, "config", "crd", "bases")},
@@ -199,6 +203,7 @@ var _ = BeforeSuite(func() {
var _ = AfterSuite(func() {
By("tearing down the test environment")
+ cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
diff --git a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/makefile.go b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/makefile.go
index 45ff7cee7..b1d9e7d11 100644
--- a/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/makefile.go
+++ b/tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/makefile.go
@@ -74,7 +74,7 @@ func (f *Makefile) SetTemplateDefaults() error {
const makefileTemplate = `# Image URL to use all building/pushing image targets
IMG ?= {{ .Image }}
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
-ENVTEST_K8S_VERSION = 1.30.0
+ENVTEST_K8S_VERSION = 1.31.0
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))