Skip to main content
LangWatch supports tracing local models served by Ollama through its OpenAI-compatible endpoint.

Installation

go get github.com/langwatch/langwatch/sdk-go github.com/openai/openai-go

Usage

Set LANGWATCH_API_KEY environment variable before running. Ollama runs locally so no API key is needed for the model.
package main

import (
	"context"
	"log"

	langwatch "github.com/langwatch/langwatch/sdk-go"
	otelopenai "github.com/langwatch/langwatch/sdk-go/instrumentation/openai"
	"github.com/openai/openai-go"
	oaioption "github.com/openai/openai-go/option"
	"go.opentelemetry.io/otel"
	sdktrace "go.opentelemetry.io/otel/sdk/trace"
)

func main() {
	ctx := context.Background()

	// Set up LangWatch exporter
	exporter, err := langwatch.NewDefaultExporter(ctx)
	if err != nil {
		log.Fatalf("failed to create exporter: %v", err)
	}
	tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(exporter))
	otel.SetTracerProvider(tp)
	defer tp.Shutdown(ctx) // Critical: ensures traces are flushed

	// Create Ollama client via OpenAI-compatible API
	client := openai.NewClient(
		oaioption.WithBaseURL(os.Getenv("OLLAMA_BASE_URL")),
		oaioption.WithAPIKey("ollama"), // Ollama doesn't require a real key
		oaioption.WithMiddleware(otelopenai.Middleware("my-app",
			otelopenai.WithCaptureInput(),
			otelopenai.WithCaptureOutput(),
			otelopenai.WithGenAISystem("ollama"),
		)),
	)

	response, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		Model: "openai/gpt-5",
		Messages: []openai.ChatCompletionMessageParamUnion{
			openai.SystemMessage("You are a helpful assistant."),
			openai.UserMessage("Hello, Ollama!"),
		},
	})
	if err != nil {
		log.Fatalf("Chat completion failed: %v", err)
	}

	log.Printf("Response: %s", response.Choices[0].Message.Content)
}
The defer tp.Shutdown(ctx) call is essential. Without it, traces buffered in memory will be lost when your application exits.