Skip to content

Commit 34a06d0

Browse files
committed
adding examples
1 parent 4de3941 commit 34a06d0

12 files changed

+696
-33
lines changed

consumer.Dockerfile

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
FROM golang:1.22-alpine AS builder
2+
3+
ENV PATH="/go/bin:${PATH}"
4+
ENV GO111MODULE=on
5+
ENV CGO_ENABLED=1
6+
ENV GOOS=linux
7+
ENV GOARCH=arm64
8+
9+
WORKDIR /go/src
10+
11+
COPY . .
12+
RUN go mod download
13+
14+
RUN apk -U add ca-certificates
15+
RUN apk update && apk upgrade && apk add pkgconf git bash build-base sudo
16+
RUN apk update && apk add librdkafka-dev
17+
18+
RUN go build -tags musl --ldflags "-extldflags -static" -o consumer ./example/consumer
19+
20+
FROM scratch AS runner
21+
22+
COPY --from=builder /go/src/consumer /
23+
24+
EXPOSE 8080
25+
26+
ENTRYPOINT ["./consumer"]

consumer.go

+18-13
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ func NewConsumer(conf *kafka.ConfigMap, opts ...Option) (*Consumer, error) {
2222
if err != nil {
2323
return nil, err
2424
}
25+
opts = append(opts, withConfig(conf))
2526
cfg := newConfig(opts...)
2627
return &Consumer{Consumer: c, cfg: cfg}, nil
2728
}
@@ -35,30 +36,34 @@ func WrapConsumer(c *kafka.Consumer, opts ...Option) *Consumer {
3536
return wrapped
3637
}
3738

38-
func (c *Consumer) Pool(timeout int) kafka.Event {
39+
func (c *Consumer) Poll(timeoutMs int) (event kafka.Event) {
3940
if c.prev != nil {
4041
c.prev.End()
4142
}
42-
event := c.Consumer.Poll(timeout)
43-
switch e := event.(type) {
43+
e := c.Consumer.Poll(timeoutMs)
44+
switch e := e.(type) {
4445
case *kafka.Message:
4546
span := c.startSpan(e)
47+
// latest span is stored to be closed when the next message is polled or when the consumer is closed
4648
c.prev = span
4749
}
4850

49-
return event
51+
return e
5052
}
5153

5254
// ReadMessage polls the consumer for a message. Message will be traced.
5355
func (c *Consumer) ReadMessage(timeout time.Duration) (*kafka.Message, error) {
5456
if c.prev != nil {
55-
c.prev.End()
57+
if c.prev.IsRecording() {
58+
c.prev.End()
59+
}
5660
c.prev = nil
5761
}
5862
msg, err := c.Consumer.ReadMessage(timeout)
5963
if err != nil {
6064
return nil, err
6165
}
66+
// latest span is stored to be closed when the next message is polled or when the consumer is closed
6267
c.prev = c.startSpan(msg)
6368
return msg, nil
6469
}
@@ -71,7 +76,9 @@ func (c *Consumer) Close() error {
7176
// not enabled, because otherwise there would be a data race from the
7277
// consuming goroutine.
7378
if c.prev != nil {
74-
c.prev.End()
79+
if c.prev.IsRecording() {
80+
c.prev.End()
81+
}
7582
c.prev = nil
7683
}
7784
return err
@@ -95,6 +102,11 @@ func (c *Consumer) startSpan(msg *kafka.Message) trace.Span {
95102
semconv.MessagingDestinationPartitionID(strconv.Itoa(int(msg.TopicPartition.Partition))),
96103
semconv.MessagingMessageBodySize(getMsgSize(msg)),
97104
}
105+
106+
if c.cfg.attributeInjectFunc != nil {
107+
attrs = append(attrs, c.cfg.attributeInjectFunc(msg)...)
108+
}
109+
98110
opts := []trace.SpanStartOption{
99111
trace.WithAttributes(attrs...),
100112
trace.WithSpanKind(trace.SpanKindConsumer),
@@ -105,10 +117,3 @@ func (c *Consumer) startSpan(msg *kafka.Message) trace.Span {
105117
c.cfg.Propagators.Inject(newCtx, carrier)
106118
return span
107119
}
108-
109-
func getMsgSize(msg *kafka.Message) (size int) {
110-
for _, header := range msg.Headers {
111-
size += len(header.Key) + len(header.Value)
112-
}
113-
return size + len(msg.Value) + len(msg.Key)
114-
}

docker-compose.yml

+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
services:
2+
broker:
3+
image: apache/kafka-native:3.8.0
4+
hostname: broker
5+
container_name: broker
6+
ports:
7+
- 9092:9092
8+
environment:
9+
KAFKA_BROKER_ID: 1
10+
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,CONTROLLER:PLAINTEXT
11+
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
12+
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
13+
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
14+
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
15+
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
16+
KAFKA_PROCESS_ROLES: broker,controller
17+
KAFKA_NODE_ID: 1
18+
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker:29093
19+
KAFKA_LISTENERS: PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092
20+
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
21+
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
22+
KAFKA_LOG_DIRS: /tmp/kraft-combined-logs
23+
CLUSTER_ID: MkU3OEVBNTcwNTJENDM2Qk
24+
25+
kafka-init-topics:
26+
image: confluentinc/cp-kafka:7.2.1
27+
depends_on:
28+
- broker
29+
command: "bash -c 'echo Waiting for Kafka to be ready... && \
30+
cub kafka-ready -b broker:29092 1 30 && \
31+
kafka-topics --create --topic test-topic --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server broker:29092'"
32+
kafka-ui:
33+
image: provectuslabs/kafka-ui
34+
container_name: kafka-ui
35+
ports:
36+
- "8089:8080"
37+
restart: always
38+
environment:
39+
- KAFKA_CLUSTERS_0_NAME=local
40+
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=broker:29092
41+
42+
consumer:
43+
build:
44+
context: .
45+
dockerfile: consumer.Dockerfile
46+
environment:
47+
KAFKA_SERVER: broker:29092
48+
KAFKA_TOPIC: test-topic
49+
50+
producer:
51+
build:
52+
context: .
53+
dockerfile: producer.Dockerfile
54+
environment:
55+
KAFKA_SERVER: broker:29092
56+
KAFKA_TOPIC: test-topic
57+
58+
jaeger:
59+
image: jaegertracing/all-in-one:1.60
60+
container_name: jaeger
61+
environment:
62+
- COLLECTOR_ZIPKIN_HOST_PORT=:9411
63+
ports:
64+
- "6831:6831/udp"
65+
- "6832:6832/udp"
66+
- "5778:5778"
67+
- "16686:16686"
68+
- "4317:4317"
69+
- "4318:4318"
70+
- "14250:14250"
71+
- "14268:14268"
72+
- "14269:14269"
73+
- "9411:9411"
74+
restart: always

exmaple/consumer/main.go renamed to example/consumer/main.go

+28-10
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,46 @@
1-
package consumer
1+
package main
22

3-
// create a main function and use confluent-kafka-go to subscribe to the topic and consume messages
43
import (
4+
"context"
55
"fmt"
66
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
7+
"log"
78
"opentelemetry.io/contrib/instrumentation/github.com/confluentinc/confluent-kafka-go/v2/otelkafka"
9+
"opentelemetry.io/contrib/instrumentation/github.com/confluentinc/confluent-kafka-go/v2/otelkafka/example"
810
"os"
11+
"os/signal"
912
)
1013

1114
func main() {
12-
// create a new consumer instance
13-
consumer, err := kafka.NewConsumer(&kafka.ConfigMap{
14-
"bootstrap.servers": "localhost:9092",
15+
signals := make(chan os.Signal, 1)
16+
signal.Notify(signals, os.Interrupt)
17+
18+
topic := os.Getenv("KAFKA_TOPIC")
19+
kafkaServers := os.Getenv("KAFKA_SERVER")
20+
21+
tp, err := example.InitTracer("consumer-app")
22+
if err != nil {
23+
log.Fatal(err)
24+
}
25+
defer func() {
26+
if err := tp.Shutdown(context.Background()); err != nil {
27+
log.Printf("Error shutting down tracer provider: %v", err)
28+
}
29+
}()
30+
31+
consumer, err := otelkafka.NewConsumer(&kafka.ConfigMap{
32+
"bootstrap.servers": kafkaServers,
1533
"group.id": "myGroup",
1634
"auto.offset.reset": "earliest",
1735
})
36+
1837
if err != nil {
1938
fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err)
2039
os.Exit(1)
2140
}
22-
fmt.Println("Created Consumer")
23-
24-
otelConsumer := otelkafka.NewConsumer(consumer)
2541

2642
// subscribe to the topic
27-
err = otelConsumer.SubscribeTopics([]string{"myTopic"}, nil)
43+
err = consumer.SubscribeTopics([]string{topic}, nil)
2844
if err != nil {
2945
fmt.Fprintf(os.Stderr, "Failed to subscribe to topic: %s\n", err)
3046
os.Exit(1)
@@ -39,15 +55,17 @@ func main() {
3955
fmt.Printf("Caught signal %v: terminating\n", sig)
4056
run = false
4157
default:
42-
ev := otelConsumer.Poll(100)
58+
ev := consumer.Poll(100)
4359
if ev == nil {
4460
continue
4561
}
4662

4763
switch e := ev.(type) {
4864
case *kafka.Message:
65+
4966
fmt.Printf("%% Message on %s:\n%s\n",
5067
e.TopicPartition, string(e.Value))
68+
5169
case kafka.Error:
5270
// Errors should generally be considered as informational, the client will try to automatically recover
5371
fmt.Fprintf(os.Stderr, "%% Error: %v\n", e)

example/producer/main.go

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
package main
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
7+
"go.opentelemetry.io/otel"
8+
"log"
9+
"opentelemetry.io/contrib/instrumentation/github.com/confluentinc/confluent-kafka-go/v2/otelkafka"
10+
"opentelemetry.io/contrib/instrumentation/github.com/confluentinc/confluent-kafka-go/v2/otelkafka/example"
11+
"os"
12+
)
13+
14+
func main() {
15+
tp, err := example.InitTracer("producer-app")
16+
if err != nil {
17+
log.Fatal(err)
18+
}
19+
defer func() {
20+
if err := tp.Shutdown(context.Background()); err != nil {
21+
log.Printf("Error shutting down tracer provider: %v", err)
22+
}
23+
}()
24+
25+
bootstrapServers := os.Getenv("KAFKA_SERVER")
26+
topic := os.Getenv("KAFKA_TOPIC")
27+
28+
p, err := otelkafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers})
29+
30+
if err != nil {
31+
fmt.Printf("Failed to create producer: %s\n", err)
32+
os.Exit(1)
33+
}
34+
fmt.Printf("Created Producer %v\n", p)
35+
36+
tr := otel.Tracer("produce")
37+
ctx, span := tr.Start(context.Background(), "produce message")
38+
defer span.End()
39+
40+
// Optional delivery channel, if not specified the Producer object's
41+
// .Events channel is used.
42+
deliveryChan := make(chan kafka.Event)
43+
message := &kafka.Message{
44+
TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny},
45+
Value: []byte("Hello Go!"),
46+
Key: []byte("message-key"),
47+
Headers: []kafka.Header{{Key: "myTestHeader", Value: []byte("header values are binary")}},
48+
}
49+
otel.GetTextMapPropagator().Inject(ctx, otelkafka.NewMessageCarrier(message))
50+
51+
err = p.Produce(message, deliveryChan)
52+
if err != nil {
53+
fmt.Printf("Produce failed: %v\n", err)
54+
os.Exit(1)
55+
}
56+
57+
e := <-deliveryChan
58+
m := e.(*kafka.Message)
59+
60+
if m.TopicPartition.Error != nil {
61+
fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error)
62+
} else {
63+
fmt.Printf("Delivered message to topic %s [%d] at offset %v\n",
64+
*m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset)
65+
}
66+
67+
close(deliveryChan)
68+
}

example/tracer.go

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
package example
2+
3+
import (
4+
"context"
5+
"go.opentelemetry.io/otel"
6+
"go.opentelemetry.io/otel/attribute"
7+
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
8+
"go.opentelemetry.io/otel/propagation"
9+
"go.opentelemetry.io/otel/sdk/resource"
10+
sdktrace "go.opentelemetry.io/otel/sdk/trace"
11+
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
12+
)
13+
14+
func InitTracer(serviceName string) (*sdktrace.TracerProvider, error) {
15+
exporter, err := otlptracegrpc.New(
16+
context.Background(),
17+
otlptracegrpc.WithEndpoint("jaeger:4317"),
18+
otlptracegrpc.WithInsecure(),
19+
)
20+
if err != nil {
21+
return nil, err
22+
}
23+
24+
attrs := []attribute.KeyValue{
25+
semconv.ServiceName(serviceName),
26+
}
27+
28+
tp := sdktrace.NewTracerProvider(
29+
sdktrace.WithSampler(sdktrace.AlwaysSample()),
30+
sdktrace.WithBatcher(exporter),
31+
sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL, attrs...)),
32+
)
33+
otel.SetTracerProvider(tp)
34+
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}))
35+
return tp, nil
36+
}

go.mod

+20-3
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,27 @@ module opentelemetry.io/contrib/instrumentation/github.com/confluentinc/confluen
33
go 1.22.5
44

55
require (
6-
github.com/confluentinc/confluent-kafka-go/v2 v2.5.0 // indirect
6+
github.com/confluentinc/confluent-kafka-go/v2 v2.5.0
7+
go.opentelemetry.io/otel v1.28.0
8+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
9+
go.opentelemetry.io/otel/sdk v1.28.0
10+
go.opentelemetry.io/otel/trace v1.28.0
11+
)
12+
13+
require (
14+
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
715
github.com/go-logr/logr v1.4.2 // indirect
816
github.com/go-logr/stdr v1.2.2 // indirect
9-
go.opentelemetry.io/otel v1.28.0 // indirect
17+
github.com/google/uuid v1.6.0 // indirect
18+
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
19+
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
1020
go.opentelemetry.io/otel/metric v1.28.0 // indirect
11-
go.opentelemetry.io/otel/trace v1.28.0 // indirect
21+
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
22+
golang.org/x/net v0.26.0 // indirect
23+
golang.org/x/sys v0.21.0 // indirect
24+
golang.org/x/text v0.16.0 // indirect
25+
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
26+
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
27+
google.golang.org/grpc v1.64.0 // indirect
28+
google.golang.org/protobuf v1.34.2 // indirect
1229
)

0 commit comments

Comments
 (0)