我已经在docker本地启动了jeager:
services:
jaeger:
image: jaegertracing/all-in-one
ports:
- "14268:14268"
- "16686:16686"
然后我通过 grpc 连接到它。我正在尝试通过 docker 发送的端口进行连接。
tctx, cancel := context.WithTimeout(context.Background(),
5*time.Second)
defer cancel()
const maxCallMsgSize = 1024 * 1024 * 100
conn, err := grpc.DialContext(
tctx,
"localhost:14268",
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(maxCallMsgSize),
grpc.MaxCallSendMsgSize(maxCallMsgSize),
),
)
if err != nil {
return fmt.Errorf("conn: %w", err)
}
exp, err := otlptracegrpc.New(tctx, otlptracegrpc.WithGRPCConn(conn))
if err != nil {
return fmt.Errorf("exporter: %w", err)
}
res, err := resource.Merge(
resource.Default(),
resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceName(serviceName),
),
)
if err != nil {
return fmt.Errorf("resource: %w", err)
}
tp := tracesdk.NewTracerProvider(
tracesdk.WithSampler(tracesdk.AlwaysSample()),
tracesdk.WithBatcher(exp),
tracesdk.WithResource(res),
)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
tracer = otel.Tracer("trader-bot")
但是当你尝试向 jeager 发送一个 span 时,会出现以下错误:
export: context deadline exceeded: rpc error: code = Unavailable desc = connection error: desc = "error reading server preface: http2: frame too large"
这里可能出现什么问题以及如何解决?
您可以尝试以下取自文档的示例。
docker run -d --name jaeger \
-e COLLECTOR_OTLP_ENABLED=true \
-p 16686:16686 \
-p 4317:4317 \
-p 4318:4318 \
jaegertracing/all-in-one:latest
要了解有关如何使用 OTLP HTTP 导出器的更多信息,请尝试 otel-collector 示例
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
"go.opentelemetry.io/otel/trace"
)
// Initializes an OTLP exporter, and configures the corresponding trace and
// metric providers.
func initProvider() (func(context.Context) error, error) {
ctx := context.Background()
res, err := resource.New(ctx,
resource.WithAttributes(
// the service name used to display traces in backends
semconv.ServiceName("test-service"),
),
)
if err != nil {
return nil, fmt.Errorf("failed to create resource: %w", err)
}
// If the OpenTelemetry Collector is running on a local cluster (minikube or
// microk8s), it should be accessible through the NodePort service at the
// `localhost:30080` endpoint. Otherwise, replace `localhost` with the
// endpoint of your cluster. If you run the app inside k8s, then you can
// probably connect directly to the service through dns.
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, "localhost:4317",
// Note the use of insecure transport here. TLS is recommended in production.
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
)
if err != nil {
return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err)
}
// Set up a trace exporter
traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithGRPCConn(conn))
if err != nil {
return nil, fmt.Errorf("failed to create trace exporter: %w", err)
}
// Register the trace exporter with a TracerProvider, using a batch
// span processor to aggregate spans before export.
bsp := sdktrace.NewBatchSpanProcessor(traceExporter)
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithResource(res),
sdktrace.WithSpanProcessor(bsp),
)
otel.SetTracerProvider(tracerProvider)
// set global propagator to tracecontext (the default is no-op).
otel.SetTextMapPropagator(propagation.TraceContext{})
// Shutdown will flush any remaining spans and shut down the exporter.
return tracerProvider.Shutdown, nil
}
func main() {
log.Printf("Waiting for connection...")
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
shutdown, err := initProvider()
if err != nil {
log.Fatal(err)
}
defer func() {
if err := shutdown(ctx); err != nil {
log.Fatal("failed to shutdown TracerProvider: %w", err)
}
}()
tracer := otel.Tracer("test-tracer")
// Attributes represent additional key-value descriptors that can be bound
// to a metric observer or recorder.
commonAttrs := []attribute.KeyValue{
attribute.String("attrA", "chocolate"),
attribute.String("attrB", "raspberry"),
attribute.String("attrC", "vanilla"),
}
// work begins
ctx, span := tracer.Start(
ctx,
"CollectorExporter-Example",
trace.WithAttributes(commonAttrs...))
defer span.End()
for i := 0; i < 10; i++ {
_, iSpan := tracer.Start(ctx, fmt.Sprintf("Sample-%d", i))
log.Printf("Doing really hard work (%d / 10)\n", i+1)
<-time.After(time.Second)
iSpan.End()
}
log.Printf("Done!")
}
$ go mod init tmp
...
$ go mod tidy
...
$ go run .
2024/03/11 13:34:43 Waiting for connection...
2024/03/11 13:34:43 Doing really hard work (1 / 10)
2024/03/11 13:34:44 Doing really hard work (2 / 10)
2024/03/11 13:34:45 Doing really hard work (3 / 10)
2024/03/11 13:34:46 Doing really hard work (4 / 10)
2024/03/11 13:34:47 Doing really hard work (5 / 10)
2024/03/11 13:34:48 Doing really hard work (6 / 10)
2024/03/11 13:34:49 Doing really hard work (7 / 10)
2024/03/11 13:34:50 Doing really hard work (8 / 10)
2024/03/11 13:34:51 Doing really hard work (9 / 10)
2024/03/11 13:34:52 Doing really hard work (10 / 10)
2024/03/11 13:34:53 Done!
docker ps