From 5396a7c7fadce2b61c6dc31b37fadfe61de1d294 Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Tue, 3 Mar 2026 11:57:09 +0000 Subject: [PATCH 1/6] [GCP] Exploration into using structured logging This replaces klog with slog for GCP-only classes. This is a clear improvement in the GCP case as it only provides more context and clearer and more searchable debug logs, which will be attached to spans where appropriate. If we are happy with this, we can choose to continue replacing all klog with slog throughout. This will require different handlers for different environments. Or maybe we can just use the default handler for the other environments. --- cmd/conformance/gcp/main.go | 40 ++++++++++++++------- cmd/conformance/gcp/otel.go | 15 +++++--- internal/logger/gcp.go | 72 +++++++++++++++++++++++++++++++++++++ storage/gcp/antispam/gcp.go | 10 +++--- storage/gcp/gcp.go | 55 ++++++++++++++-------------- 5 files changed, 144 insertions(+), 48 deletions(-) create mode 100644 internal/logger/gcp.go diff --git a/cmd/conformance/gcp/main.go b/cmd/conformance/gcp/main.go index d5393ba00..0b701dc4b 100644 --- a/cmd/conformance/gcp/main.go +++ b/cmd/conformance/gcp/main.go @@ -24,13 +24,16 @@ import ( "net/http" "time" + "log/slog" + "os" + "github.com/transparency-dev/tessera" + "github.com/transparency-dev/tessera/internal/logger" "github.com/transparency-dev/tessera/storage/gcp" gcp_as "github.com/transparency-dev/tessera/storage/gcp/antispam" "golang.org/x/mod/sumdb/note" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" - "k8s.io/klog/v2" ) var ( @@ -40,6 +43,7 @@ var ( signer = flag.String("signer", "", "Note signer to use to sign checkpoints") persistentAntispam = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable GCP-based persistent antispam storage") traceFraction = flag.Float64("trace_fraction", 0.01, "Fraction of open-telemetry span traces to sample") + projectID = flag.String("project", "", "GCP Project ID for Cloud Logging traces (optional)") additionalSigners = []string{} ) @@ -51,10 +55,12 @@ func init() { } func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + handler := slog.NewJSONHandler(os.Stderr, nil) + slog.SetDefault(slog.New(logger.NewGCPContextHandler(handler, *projectID))) + shutdownOTel := initOTel(ctx, *traceFraction) defer shutdownOTel(ctx) @@ -64,7 +70,8 @@ func main() { gcpCfg := storageConfigFromFlags() driver, err := gcp.New(ctx, gcpCfg) if err != nil { - klog.Exitf("Failed to create new GCP storage: %v", err) + slog.Error("Failed to create new GCP storage", "error", err) + os.Exit(1) } var antispam tessera.Antispam @@ -73,7 +80,8 @@ func main() { asOpts := gcp_as.AntispamOpts{} // Use defaults antispam, err = gcp_as.NewAntispam(ctx, fmt.Sprintf("%s-antispam", *spanner), asOpts) if err != nil { - klog.Exitf("Failed to create new GCP antispam storage: %v", err) + slog.Error("Failed to create new GCP antispam storage", "error", err) + os.Exit(1) } } @@ -84,7 +92,8 @@ func main() { WithPushback(10*4096). WithAntispam(tessera.DefaultAntispamInMemorySize, antispam)) if err != nil { - klog.Exit(err) + slog.Error("Failed to append", "error", err) + os.Exit(1) } // Expose a HTTP handler for the conformance test writes. @@ -121,14 +130,17 @@ func main() { ReadHeaderTimeout: 5 * time.Second, } if err := http2.ConfigureServer(h1s, h2s); err != nil { - klog.Exitf("http2.ConfigureServer: %v", err) + slog.Error("http2.ConfigureServer", "error", err) + os.Exit(1) } if err := h1s.ListenAndServe(); err != nil { if err := shutdown(ctx); err != nil { - klog.Exit(err) + slog.Error("Failed to shutdown", "error", err) + os.Exit(1) } - klog.Exitf("ListenAndServe: %v", err) + slog.Error("ListenAndServe", "error", err) + os.Exit(1) } } @@ -136,10 +148,12 @@ func main() { // provided via flags. func storageConfigFromFlags() gcp.Config { if *bucket == "" { - klog.Exit("--bucket must be set") + slog.Error("--bucket must be set") + os.Exit(1) } if *spanner == "" { - klog.Exit("--spanner must be set") + slog.Error("--spanner must be set") + os.Exit(1) } return gcp.Config{ Bucket: *bucket, @@ -150,14 +164,16 @@ func storageConfigFromFlags() gcp.Config { func signerFromFlags() (note.Signer, []note.Signer) { s, err := note.NewSigner(*signer) if err != nil { - klog.Exitf("Failed to create new signer: %v", err) + slog.Error("Failed to create new signer", "error", err) + os.Exit(1) } var a []note.Signer for _, as := range additionalSigners { s, err := note.NewSigner(as) if err != nil { - klog.Exitf("Failed to create additional signer: %v", err) + slog.Error("Failed to create additional signer", "error", err) + os.Exit(1) } a = append(a, s) } diff --git a/cmd/conformance/gcp/otel.go b/cmd/conformance/gcp/otel.go index ed103a4dc..e31ad2924 100644 --- a/cmd/conformance/gcp/otel.go +++ b/cmd/conformance/gcp/otel.go @@ -25,9 +25,11 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "log/slog" + "os" + mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric" texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace" - "k8s.io/klog/v2" ) // initOTel initialises the open telemetry support for metrics and tracing. @@ -45,7 +47,7 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) } shutdownFuncs = nil if err != nil { - klog.Errorf("OTel shutdown: %v", err) + slog.Error("OTel shutdown", "error", err) } } @@ -60,12 +62,14 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) resource.WithDetectors(gcp.NewDetector()), ) if err != nil { - klog.Exitf("Failed to detect resources: %v", err) + slog.Error("Failed to detect resources", "error", err) + os.Exit(1) } me, err := mexporter.New() if err != nil { - klog.Exitf("Failed to create metric exporter: %v", err) + slog.Error("Failed to create metric exporter", "error", err) + os.Exit(1) return nil } // initialize a MeterProvider that periodically exports to the GCP exporter. @@ -78,7 +82,8 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) te, err := texporter.New() if err != nil { - klog.Exitf("Failed to create trace exporter: %v", err) + slog.Error("Failed to create trace exporter", "error", err) + os.Exit(1) return nil } // initialize a TracerProvier that periodically exports to the GCP exporter. diff --git a/internal/logger/gcp.go b/internal/logger/gcp.go new file mode 100644 index 000000000..422c07766 --- /dev/null +++ b/internal/logger/gcp.go @@ -0,0 +1,72 @@ +// Copyright 2026 The Tessera authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logger provides structured logging utilities. +package logger + +import ( + "context" + "log/slog" + "os" + + "go.opentelemetry.io/otel/trace" +) + +// GCPContextHandler is an slog.Handler that extracts OpenTelemetry tracing +// information from the context and adds it to the log record in the format +// expected by GCP Cloud Logging, allowing logs to be correlated with traces. +type GCPContextHandler struct { + slog.Handler + projectID string +} + +// NewGCPContextHandler wraps the provided slog.Handler. It injects GCP Cloud Logging +// compatible trace fields extracted from the context if a valid span is present. +func NewGCPContextHandler(h slog.Handler, projectID string) *GCPContextHandler { + if projectID == "" { + projectID = os.Getenv("GOOGLE_CLOUD_PROJECT") + } + return &GCPContextHandler{Handler: h, projectID: projectID} +} + +// Handle adds the trace ID, span ID, and sampled flag to the record attributes. +func (h *GCPContextHandler) Handle(ctx context.Context, r slog.Record) error { + span := trace.SpanContextFromContext(ctx) + if span.IsValid() { + // GCP Cloud Logging expects the trace ID to be formatted as: + // projects/[PROJECT_ID]/traces/[TRACE_ID] + // https://docs.cloud.google.com/logging/docs/structured-logging#structured_logging_special_fields + tracePath := span.TraceID().String() + if h.projectID != "" { + tracePath = "projects/" + h.projectID + "/traces/" + tracePath + } + + r.AddAttrs( + slog.String("logging.googleapis.com/trace", tracePath), + slog.String("logging.googleapis.com/spanId", span.SpanID().String()), + slog.Bool("logging.googleapis.com/trace_sampled", span.IsSampled()), + ) + } + return h.Handler.Handle(ctx, r) +} + +// WithAttrs returns a new handler with the given attributes, preserving the GCP handling. +func (h *GCPContextHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &GCPContextHandler{Handler: h.Handler.WithAttrs(attrs), projectID: h.projectID} +} + +// WithGroup returns a new handler with the given group name, preserving the GCP handling. +func (h *GCPContextHandler) WithGroup(name string) slog.Handler { + return &GCPContextHandler{Handler: h.Handler.WithGroup(name), projectID: h.projectID} +} diff --git a/storage/gcp/antispam/gcp.go b/storage/gcp/antispam/gcp.go index bec41ab76..06bc5bd50 100644 --- a/storage/gcp/antispam/gcp.go +++ b/storage/gcp/antispam/gcp.go @@ -36,12 +36,13 @@ import ( adminpb "cloud.google.com/go/spanner/admin/database/apiv1/databasepb" "cloud.google.com/go/spanner/apiv1/spannerpb" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/otel" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" - "k8s.io/klog/v2" ) const ( @@ -189,6 +190,7 @@ func (d *AntispamStorage) Decorator() func(f tessera.AddFn) tessera.AddFn { // // This implements tessera.Antispam. func (d *AntispamStorage) Follower(b func([]byte) ([][]byte, error)) tessera.Follower { + ctx := context.Background() f := &follower{ as: d, bundleHasher: b, @@ -203,7 +205,7 @@ func (d *AntispamStorage) Follower(b func([]byte) ([][]byte, error)) tessera.Fol r, _ := base64.StdEncoding.DecodeString(warn) gzr, _ := gzip.NewReader(bytes.NewReader([]byte(r))) w, _ := io.ReadAll(gzr) - klog.Warningf("%s\nWarning: you're running under the Spanner emulator - this is not a supported environment!\n\n", string(w)) + slog.WarnContext(ctx, string(w)+"\nWarning: you're running under the Spanner emulator - this is not a supported environment!\n\n") // Hack in a workaround for spannertest not supporting BatchWrites f.updateIndex = emulatorWorkaroundUpdateIndexTx @@ -376,7 +378,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { }) if err != nil { if err != errOutOfSync { - klog.Errorf("Failed to commit antispam population tx: %v", err) + slog.ErrorContext(ctx, "Failed to commit antispam population tx", "error", err) } stop() next = nil @@ -453,7 +455,7 @@ func createAndPrepareTables(ctx context.Context, spannerDB string, ddl []string, } defer func() { if err := adminClient.Close(); err != nil { - klog.Warningf("adminClient.Close(): %v", err) + slog.WarnContext(ctx, "adminClient.Close() failed", "error", err) } }() diff --git a/storage/gcp/gcp.go b/storage/gcp/gcp.go index fdf90ad77..0ff1bed99 100644 --- a/storage/gcp/gcp.go +++ b/storage/gcp/gcp.go @@ -48,6 +48,8 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "log/slog" + gcs "cloud.google.com/go/storage" "github.com/google/go-cmp/cmp" "github.com/transparency-dev/merkle/rfc6962" @@ -65,7 +67,6 @@ import ( "google.golang.org/api/iterator" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/klog/v2" ) const ( @@ -347,7 +348,7 @@ func (a *Appender) integrateEntriesJob(ctx context.Context) { } return nil }); err != nil { - klog.Error(err) + slog.ErrorContext(ctx, "integrateEntriesJob failed", "error", err) } } } @@ -377,7 +378,7 @@ func (a *Appender) publishCheckpointJob(ctx context.Context, pubInterval, republ } return nil }); err != nil { - klog.Error(err) + slog.ErrorContext(ctx, "publishCheckpoint failed", "error", err) } } } @@ -421,7 +422,7 @@ func (a *Appender) garbageCollectorJob(ctx context.Context, i time.Duration) { } return nil }); err != nil { - klog.Warning(err) + slog.WarnContext(ctx, "garbageCollectTask failed", "error", err) } } @@ -464,7 +465,7 @@ func (a *Appender) updateCheckpoint(ctx context.Context, size uint64, root []byt return fmt.Errorf("writeCheckpoint: %v", err) } - klog.V(2).Infof("Created and stored latest checkpoint: %d, %x", size, root) + slog.DebugContext(ctx, "Created and stored latest checkpoint", "size", size, "root", fmt.Sprintf("%x", root)) return nil }) @@ -656,7 +657,7 @@ func integrate(ctx context.Context, fromSeq uint64, lh [][]byte, logStore *logRe if err := errG.Wait(); err != nil { return nil, err } - klog.V(1).Infof("New tree: %d, %x", newSize, newRoot) + slog.DebugContext(ctx, "New tree integrated", "size", newSize, "root", fmt.Sprintf("%x", newRoot)) return newRoot, nil }) @@ -709,20 +710,20 @@ func (a *Appender) updateEntryBundles(ctx context.Context, fromSeq uint64, entri numAdded++ if entriesInBundle == layout.EntryBundleWidth { // This bundle is full, so we need to write it out... - klog.V(1).Infof("In-memory bundle idx %d is full, attempting write to GCS", bundleIndex) + slog.DebugContext(ctx, "In-memory bundle is full, attempting write to GCS", "bundleIndex", bundleIndex) goSetEntryBundle(ctx, bundleIndex, 0, bundleWriter.Bytes()) // ... and prepare the next entry bundle for any remaining entries in the batch bundleIndex++ entriesInBundle = 0 // Don't use Reset/Truncate here - the backing []bytes is still being used by goSetEntryBundle above. bundleWriter = &bytes.Buffer{} - klog.V(1).Infof("Starting to fill in-memory bundle idx %d", bundleIndex) + slog.DebugContext(ctx, "Starting to fill in-memory bundle", "bundleIndex", bundleIndex) } } // If we have a partial bundle remaining once we've added all the entries from the batch, // this needs writing out too. if entriesInBundle > 0 { - klog.V(1).Infof("Attempting to write in-memory partial bundle idx %d.%d to GCS", bundleIndex, entriesInBundle) + slog.DebugContext(ctx, "Attempting to write in-memory partial bundle to GCS", "bundleIndex", bundleIndex, "entriesInBundle", entriesInBundle) goSetEntryBundle(ctx, bundleIndex, uint8(entriesInBundle), bundleWriter.Bytes()) } return seqErr.Wait() @@ -971,7 +972,7 @@ func (s *spannerCoordinator) consumeEntries(ctx context.Context, limit uint64, f endSeq = l } - klog.V(1).Infof("Consuming bundles start from %d to at most %d", fromSeq, endSeq-1) + slog.DebugContext(ctx, "Consuming bundles", "fromSeq", fromSeq, "toSeq", endSeq-1) span.AddEvent("Reading entries from sequence table") // Now read the sequenced starting at the index we got above. @@ -1010,7 +1011,7 @@ func (s *spannerCoordinator) consumeEntries(ctx context.Context, limit uint64, f orderCheck += int64(len(b)) } if len(seqsConsumed) == 0 && !forceUpdate { - klog.V(1).Info("Found no rows to sequence") + slog.DebugContext(ctx, "Found no rows to sequence") return nil } @@ -1111,7 +1112,7 @@ func (s *spannerCoordinator) publishCheckpoint(ctx context.Context, minStaleActi cpAge := time.Since(pubAt) if cpAge < minStaleActive { - klog.V(1).Infof("publishCheckpoint: last checkpoint published %s ago (< required %s), not publishing new checkpoint", cpAge, minStaleActive) + slog.DebugContext(ctx, "publishCheckpoint: last checkpoint published too recently, not publishing new checkpoint", "cpAge", cpAge, "minStaleActive", minStaleActive) outcomeAttrs = append(outcomeAttrs, errorTypeKey.String("skipped")) return nil } @@ -1140,12 +1141,12 @@ func (s *spannerCoordinator) publishCheckpoint(ctx context.Context, minStaleActi } if !shouldPublish { - klog.V(1).Infof("publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") + slog.DebugContext(ctx, "publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") outcomeAttrs = append(outcomeAttrs, errorTypeKey.String("skipped_no_growth")) return nil } - klog.V(1).Infof("publishCheckpoint: updating checkpoint (replacing %s old checkpoint)", cpAge) + slog.DebugContext(ctx, "publishCheckpoint: updating checkpoint", "cpAge", cpAge) span.AddEvent("Publishing checkpoint") if err := f(ctx, currentSize, rootHash); err != nil { @@ -1340,12 +1341,12 @@ func (s *gcsStorage) setObject(ctx context.Context, objName string, data []byte, } if !bytes.Equal(existing, data) { span.AddEvent("Non-idempotent write") - klog.Errorf("Resource %q non-idempotent write:\n%s", objName, cmp.Diff(existing, data)) + slog.ErrorContext(ctx, "Resource non-idempotent write", "objName", objName, "diff", cmp.Diff(existing, data)) return fmt.Errorf("precondition failed: resource content for %q differs from data to-be-written", objName) } span.AddEvent("Idempotent write") - klog.V(2).Infof("setObject: identical resource already exists for %q, continuing", objName) + slog.DebugContext(ctx, "setObject: identical resource already exists", "objName", objName) return nil } @@ -1375,7 +1376,7 @@ func (s *gcsStorage) deleteObjectsWithPrefix(ctx context.Context, objPrefix stri } return err } - klog.V(2).Infof("Deleting object %s", attr.Name) + slog.DebugContext(ctx, "Deleting object", "name", attr.Name) if err := bkt.Object(attr.Name).Delete(ctx); err != nil { errs = append(errs, err) } @@ -1458,16 +1459,16 @@ func (m *MigrationStorage) AwaitIntegration(ctx context.Context, sourceSize uint case <-t.C: from, _, err := m.sequencer.currentTree(ctx) if err != nil && !errors.Is(err, os.ErrNotExist) { - klog.Warningf("readTreeState: %v", err) + slog.WarnContext(ctx, "readTreeState failed", "error", err) continue } - klog.Infof("Integrate from %d (Target %d)", from, sourceSize) + slog.InfoContext(ctx, "Integrate", "from", from, "sourceSize", sourceSize) newSize, newRoot, err := m.buildTree(ctx, sourceSize) if err != nil { - klog.Warningf("integrate: %v", err) + slog.WarnContext(ctx, "integrate failed", "error", err) } if newSize == sourceSize { - klog.Infof("Integrated to %d with roothash %x", newSize, newRoot) + slog.InfoContext(ctx, "Integrated", "newSize", newSize, "newRoot", fmt.Sprintf("%x", newRoot)) return newRoot, nil } } @@ -1543,28 +1544,28 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui } from := uint64(fromSeq) - klog.V(1).Infof("Integrating from %d", from) + slog.DebugContext(ctx, "Integrating", "from", from) lh, err := m.fetchLeafHashes(ctx, from, sourceSize, sourceSize) if err != nil { return fmt.Errorf("fetchLeafHashes(%d, %d, %d): %v", from, sourceSize, sourceSize, err) } if len(lh) == 0 { - klog.Infof("Integrate: nothing to do, nothing done") + slog.InfoContext(ctx, "Integrate: nothing to do, nothing done") // Set these to the current state of the tree so we reflect that in buildTree's return values. newSize, newRoot = from, rootHash return nil } added := uint64(len(lh)) - klog.Infof("Integrate: adding %d entries to existing tree size %d", len(lh), from) + slog.InfoContext(ctx, "Integrate: adding entries to existing tree", "count", len(lh), "from", from) newRoot, err = integrate(ctx, from, lh, m.logStore) if err != nil { - klog.Warningf("integrate failed: %v", err) + slog.WarnContext(ctx, "integrate failed", "error", err) return fmt.Errorf("integrate failed: %v", err) } newSize = from + added - klog.Infof("Integrate: added %d entries", added) + slog.InfoContext(ctx, "Integrate: added entries", "added", added) // integration was successful, so we can update our coordination row m := make([]*spanner.Mutation, 0) @@ -1590,7 +1591,7 @@ func createAndPrepareTables(ctx context.Context, spannerDB string, ddl []string, } defer func() { if err := adminClient.Close(); err != nil { - klog.Warningf("adminClient.Close(): %v", err) + slog.WarnContext(ctx, "adminClient.Close() failed", "error", err) } }() From 2e5bc92f27b9d8766b18312446426975ba262f4f Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Tue, 3 Mar 2026 14:13:05 +0000 Subject: [PATCH 2/6] Change implicit key-values to explicit typed ones --- cmd/conformance/gcp/main.go | 16 +++++++------- cmd/conformance/gcp/otel.go | 8 +++---- storage/gcp/antispam/gcp.go | 4 ++-- storage/gcp/gcp.go | 44 ++++++++++++++++++------------------- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/cmd/conformance/gcp/main.go b/cmd/conformance/gcp/main.go index 0b701dc4b..6e420fd66 100644 --- a/cmd/conformance/gcp/main.go +++ b/cmd/conformance/gcp/main.go @@ -70,7 +70,7 @@ func main() { gcpCfg := storageConfigFromFlags() driver, err := gcp.New(ctx, gcpCfg) if err != nil { - slog.Error("Failed to create new GCP storage", "error", err) + slog.Error("Failed to create new GCP storage", slog.Any("error", err)) os.Exit(1) } @@ -80,7 +80,7 @@ func main() { asOpts := gcp_as.AntispamOpts{} // Use defaults antispam, err = gcp_as.NewAntispam(ctx, fmt.Sprintf("%s-antispam", *spanner), asOpts) if err != nil { - slog.Error("Failed to create new GCP antispam storage", "error", err) + slog.Error("Failed to create new GCP antispam storage", slog.Any("error", err)) os.Exit(1) } } @@ -92,7 +92,7 @@ func main() { WithPushback(10*4096). WithAntispam(tessera.DefaultAntispamInMemorySize, antispam)) if err != nil { - slog.Error("Failed to append", "error", err) + slog.Error("Failed to append", slog.Any("error", err)) os.Exit(1) } @@ -130,16 +130,16 @@ func main() { ReadHeaderTimeout: 5 * time.Second, } if err := http2.ConfigureServer(h1s, h2s); err != nil { - slog.Error("http2.ConfigureServer", "error", err) + slog.Error("http2.ConfigureServer", slog.Any("error", err)) os.Exit(1) } if err := h1s.ListenAndServe(); err != nil { if err := shutdown(ctx); err != nil { - slog.Error("Failed to shutdown", "error", err) + slog.Error("Failed to shutdown", slog.Any("error", err)) os.Exit(1) } - slog.Error("ListenAndServe", "error", err) + slog.Error("ListenAndServe", slog.Any("error", err)) os.Exit(1) } } @@ -164,7 +164,7 @@ func storageConfigFromFlags() gcp.Config { func signerFromFlags() (note.Signer, []note.Signer) { s, err := note.NewSigner(*signer) if err != nil { - slog.Error("Failed to create new signer", "error", err) + slog.Error("Failed to create new signer", slog.Any("error", err)) os.Exit(1) } @@ -172,7 +172,7 @@ func signerFromFlags() (note.Signer, []note.Signer) { for _, as := range additionalSigners { s, err := note.NewSigner(as) if err != nil { - slog.Error("Failed to create additional signer", "error", err) + slog.Error("Failed to create additional signer", slog.Any("error", err)) os.Exit(1) } a = append(a, s) diff --git a/cmd/conformance/gcp/otel.go b/cmd/conformance/gcp/otel.go index e31ad2924..806db94d8 100644 --- a/cmd/conformance/gcp/otel.go +++ b/cmd/conformance/gcp/otel.go @@ -47,7 +47,7 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) } shutdownFuncs = nil if err != nil { - slog.Error("OTel shutdown", "error", err) + slog.Error("OTel shutdown", slog.Any("error", err)) } } @@ -62,13 +62,13 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) resource.WithDetectors(gcp.NewDetector()), ) if err != nil { - slog.Error("Failed to detect resources", "error", err) + slog.Error("Failed to detect resources", slog.Any("error", err)) os.Exit(1) } me, err := mexporter.New() if err != nil { - slog.Error("Failed to create metric exporter", "error", err) + slog.Error("Failed to create metric exporter", slog.Any("error", err)) os.Exit(1) return nil } @@ -82,7 +82,7 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) te, err := texporter.New() if err != nil { - slog.Error("Failed to create trace exporter", "error", err) + slog.Error("Failed to create trace exporter", slog.Any("error", err)) os.Exit(1) return nil } diff --git a/storage/gcp/antispam/gcp.go b/storage/gcp/antispam/gcp.go index 06bc5bd50..d68228df7 100644 --- a/storage/gcp/antispam/gcp.go +++ b/storage/gcp/antispam/gcp.go @@ -378,7 +378,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { }) if err != nil { if err != errOutOfSync { - slog.ErrorContext(ctx, "Failed to commit antispam population tx", "error", err) + slog.ErrorContext(ctx, "Failed to commit antispam population tx", slog.Any("error", err)) } stop() next = nil @@ -455,7 +455,7 @@ func createAndPrepareTables(ctx context.Context, spannerDB string, ddl []string, } defer func() { if err := adminClient.Close(); err != nil { - slog.WarnContext(ctx, "adminClient.Close() failed", "error", err) + slog.WarnContext(ctx, "adminClient.Close() failed", slog.Any("error", err)) } }() diff --git a/storage/gcp/gcp.go b/storage/gcp/gcp.go index 0ff1bed99..8a8a22f9f 100644 --- a/storage/gcp/gcp.go +++ b/storage/gcp/gcp.go @@ -348,7 +348,7 @@ func (a *Appender) integrateEntriesJob(ctx context.Context) { } return nil }); err != nil { - slog.ErrorContext(ctx, "integrateEntriesJob failed", "error", err) + slog.ErrorContext(ctx, "integrateEntriesJob failed", slog.Any("error", err)) } } } @@ -422,7 +422,7 @@ func (a *Appender) garbageCollectorJob(ctx context.Context, i time.Duration) { } return nil }); err != nil { - slog.WarnContext(ctx, "garbageCollectTask failed", "error", err) + slog.WarnContext(ctx, "garbageCollectTask failed", slog.Any("error", err)) } } @@ -465,7 +465,7 @@ func (a *Appender) updateCheckpoint(ctx context.Context, size uint64, root []byt return fmt.Errorf("writeCheckpoint: %v", err) } - slog.DebugContext(ctx, "Created and stored latest checkpoint", "size", size, "root", fmt.Sprintf("%x", root)) + slog.DebugContext(ctx, "Created and stored latest checkpoint", slog.Uint64("size", size), slog.String("root", fmt.Sprintf("%x", root))) return nil }) @@ -657,7 +657,7 @@ func integrate(ctx context.Context, fromSeq uint64, lh [][]byte, logStore *logRe if err := errG.Wait(); err != nil { return nil, err } - slog.DebugContext(ctx, "New tree integrated", "size", newSize, "root", fmt.Sprintf("%x", newRoot)) + slog.DebugContext(ctx, "New tree integrated", slog.Uint64("size", newSize), slog.String("root", fmt.Sprintf("%x", newRoot))) return newRoot, nil }) @@ -710,20 +710,20 @@ func (a *Appender) updateEntryBundles(ctx context.Context, fromSeq uint64, entri numAdded++ if entriesInBundle == layout.EntryBundleWidth { // This bundle is full, so we need to write it out... - slog.DebugContext(ctx, "In-memory bundle is full, attempting write to GCS", "bundleIndex", bundleIndex) + slog.DebugContext(ctx, "In-memory bundle is full, attempting write to GCS", slog.Uint64("bundleIndex", bundleIndex)) goSetEntryBundle(ctx, bundleIndex, 0, bundleWriter.Bytes()) // ... and prepare the next entry bundle for any remaining entries in the batch bundleIndex++ entriesInBundle = 0 // Don't use Reset/Truncate here - the backing []bytes is still being used by goSetEntryBundle above. bundleWriter = &bytes.Buffer{} - slog.DebugContext(ctx, "Starting to fill in-memory bundle", "bundleIndex", bundleIndex) + slog.DebugContext(ctx, "Starting to fill in-memory bundle", slog.Uint64("bundleIndex", bundleIndex)) } } // If we have a partial bundle remaining once we've added all the entries from the batch, // this needs writing out too. if entriesInBundle > 0 { - slog.DebugContext(ctx, "Attempting to write in-memory partial bundle to GCS", "bundleIndex", bundleIndex, "entriesInBundle", entriesInBundle) + slog.DebugContext(ctx, "Attempting to write in-memory partial bundle to GCS", slog.Uint64("bundleIndex", bundleIndex), slog.Any("entriesInBundle", entriesInBundle)) goSetEntryBundle(ctx, bundleIndex, uint8(entriesInBundle), bundleWriter.Bytes()) } return seqErr.Wait() @@ -972,7 +972,7 @@ func (s *spannerCoordinator) consumeEntries(ctx context.Context, limit uint64, f endSeq = l } - slog.DebugContext(ctx, "Consuming bundles", "fromSeq", fromSeq, "toSeq", endSeq-1) + slog.DebugContext(ctx, "Consuming bundles", slog.Int64("fromSeq", fromSeq), slog.Int64("toSeq", endSeq-1)) span.AddEvent("Reading entries from sequence table") // Now read the sequenced starting at the index we got above. @@ -1112,7 +1112,7 @@ func (s *spannerCoordinator) publishCheckpoint(ctx context.Context, minStaleActi cpAge := time.Since(pubAt) if cpAge < minStaleActive { - slog.DebugContext(ctx, "publishCheckpoint: last checkpoint published too recently, not publishing new checkpoint", "cpAge", cpAge, "minStaleActive", minStaleActive) + slog.DebugContext(ctx, "publishCheckpoint: last checkpoint published too recently, not publishing new checkpoint", slog.Duration("cpAge", cpAge), slog.Duration("minStaleActive", minStaleActive)) outcomeAttrs = append(outcomeAttrs, errorTypeKey.String("skipped")) return nil } @@ -1146,7 +1146,7 @@ func (s *spannerCoordinator) publishCheckpoint(ctx context.Context, minStaleActi return nil } - slog.DebugContext(ctx, "publishCheckpoint: updating checkpoint", "cpAge", cpAge) + slog.DebugContext(ctx, "publishCheckpoint: updating checkpoint", slog.Duration("cpAge", cpAge)) span.AddEvent("Publishing checkpoint") if err := f(ctx, currentSize, rootHash); err != nil { @@ -1341,12 +1341,12 @@ func (s *gcsStorage) setObject(ctx context.Context, objName string, data []byte, } if !bytes.Equal(existing, data) { span.AddEvent("Non-idempotent write") - slog.ErrorContext(ctx, "Resource non-idempotent write", "objName", objName, "diff", cmp.Diff(existing, data)) + slog.ErrorContext(ctx, "Resource non-idempotent write", slog.String("objName", objName), slog.String("diff", cmp.Diff(existing, data))) return fmt.Errorf("precondition failed: resource content for %q differs from data to-be-written", objName) } span.AddEvent("Idempotent write") - slog.DebugContext(ctx, "setObject: identical resource already exists", "objName", objName) + slog.DebugContext(ctx, "setObject: identical resource already exists", slog.String("objName", objName)) return nil } @@ -1376,7 +1376,7 @@ func (s *gcsStorage) deleteObjectsWithPrefix(ctx context.Context, objPrefix stri } return err } - slog.DebugContext(ctx, "Deleting object", "name", attr.Name) + slog.DebugContext(ctx, "Deleting object", slog.String("name", attr.Name)) if err := bkt.Object(attr.Name).Delete(ctx); err != nil { errs = append(errs, err) } @@ -1459,16 +1459,16 @@ func (m *MigrationStorage) AwaitIntegration(ctx context.Context, sourceSize uint case <-t.C: from, _, err := m.sequencer.currentTree(ctx) if err != nil && !errors.Is(err, os.ErrNotExist) { - slog.WarnContext(ctx, "readTreeState failed", "error", err) + slog.WarnContext(ctx, "readTreeState failed", slog.Any("error", err)) continue } - slog.InfoContext(ctx, "Integrate", "from", from, "sourceSize", sourceSize) + slog.InfoContext(ctx, "Integrate", slog.Uint64("from", from), slog.Uint64("sourceSize", sourceSize)) newSize, newRoot, err := m.buildTree(ctx, sourceSize) if err != nil { - slog.WarnContext(ctx, "integrate failed", "error", err) + slog.WarnContext(ctx, "integrate failed", slog.Any("error", err)) } if newSize == sourceSize { - slog.InfoContext(ctx, "Integrated", "newSize", newSize, "newRoot", fmt.Sprintf("%x", newRoot)) + slog.InfoContext(ctx, "Integrated", slog.Uint64("newSize", newSize), slog.String("newRoot", fmt.Sprintf("%x", newRoot))) return newRoot, nil } } @@ -1544,7 +1544,7 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui } from := uint64(fromSeq) - slog.DebugContext(ctx, "Integrating", "from", from) + slog.DebugContext(ctx, "Integrating", slog.Uint64("from", from)) lh, err := m.fetchLeafHashes(ctx, from, sourceSize, sourceSize) if err != nil { return fmt.Errorf("fetchLeafHashes(%d, %d, %d): %v", from, sourceSize, sourceSize, err) @@ -1558,14 +1558,14 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui } added := uint64(len(lh)) - slog.InfoContext(ctx, "Integrate: adding entries to existing tree", "count", len(lh), "from", from) + slog.InfoContext(ctx, "Integrate: adding entries to existing tree", slog.Int("count", len(lh)), slog.Uint64("from", from)) newRoot, err = integrate(ctx, from, lh, m.logStore) if err != nil { - slog.WarnContext(ctx, "integrate failed", "error", err) + slog.WarnContext(ctx, "integrate failed", slog.Any("error", err)) return fmt.Errorf("integrate failed: %v", err) } newSize = from + added - slog.InfoContext(ctx, "Integrate: added entries", "added", added) + slog.InfoContext(ctx, "Integrate: added entries", slog.Uint64("added", added)) // integration was successful, so we can update our coordination row m := make([]*spanner.Mutation, 0) @@ -1591,7 +1591,7 @@ func createAndPrepareTables(ctx context.Context, spannerDB string, ddl []string, } defer func() { if err := adminClient.Close(); err != nil { - slog.WarnContext(ctx, "adminClient.Close() failed", "error", err) + slog.WarnContext(ctx, "adminClient.Close() failed", slog.Any("error", err)) } }() From 5ac047a9262eeb9cc616bebd92624d2aaec18e08 Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Tue, 3 Mar 2026 14:19:31 +0000 Subject: [PATCH 3/6] Lint usage of slog --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 69166c65c..6a115eb96 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -24,4 +24,4 @@ jobs: uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 with: version: v2.10.1 - args: --timeout=8m + args: --timeout=8m --enable=sloglint From f5da216b1bf02ac86de2b382c22728388bd57e1f Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Tue, 3 Mar 2026 15:35:00 +0000 Subject: [PATCH 4/6] Default GCP project --- cmd/conformance/gcp/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/conformance/gcp/main.go b/cmd/conformance/gcp/main.go index 6e420fd66..439f51e6e 100644 --- a/cmd/conformance/gcp/main.go +++ b/cmd/conformance/gcp/main.go @@ -43,7 +43,7 @@ var ( signer = flag.String("signer", "", "Note signer to use to sign checkpoints") persistentAntispam = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable GCP-based persistent antispam storage") traceFraction = flag.Float64("trace_fraction", 0.01, "Fraction of open-telemetry span traces to sample") - projectID = flag.String("project", "", "GCP Project ID for Cloud Logging traces (optional)") + projectID = flag.String("project", os.Getenv("GOOGLE_CLOUD_PROJECT"), "GCP Project ID for Cloud Logging traces (optional)") additionalSigners = []string{} ) From e3c4e46dc873f007125e914d3ef7b6ea31e8ae50 Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Wed, 4 Mar 2026 14:09:20 +0000 Subject: [PATCH 5/6] Don't read the GCP project name from env --- cmd/conformance/gcp/main.go | 2 +- internal/logger/gcp.go | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/conformance/gcp/main.go b/cmd/conformance/gcp/main.go index 439f51e6e..6e420fd66 100644 --- a/cmd/conformance/gcp/main.go +++ b/cmd/conformance/gcp/main.go @@ -43,7 +43,7 @@ var ( signer = flag.String("signer", "", "Note signer to use to sign checkpoints") persistentAntispam = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable GCP-based persistent antispam storage") traceFraction = flag.Float64("trace_fraction", 0.01, "Fraction of open-telemetry span traces to sample") - projectID = flag.String("project", os.Getenv("GOOGLE_CLOUD_PROJECT"), "GCP Project ID for Cloud Logging traces (optional)") + projectID = flag.String("project", "", "GCP Project ID for Cloud Logging traces (optional)") additionalSigners = []string{} ) diff --git a/internal/logger/gcp.go b/internal/logger/gcp.go index 422c07766..5b7037770 100644 --- a/internal/logger/gcp.go +++ b/internal/logger/gcp.go @@ -18,7 +18,6 @@ package logger import ( "context" "log/slog" - "os" "go.opentelemetry.io/otel/trace" ) @@ -34,9 +33,6 @@ type GCPContextHandler struct { // NewGCPContextHandler wraps the provided slog.Handler. It injects GCP Cloud Logging // compatible trace fields extracted from the context if a valid span is present. func NewGCPContextHandler(h slog.Handler, projectID string) *GCPContextHandler { - if projectID == "" { - projectID = os.Getenv("GOOGLE_CLOUD_PROJECT") - } return &GCPContextHandler{Handler: h, projectID: projectID} } From 1293420046171629133b940d30187992300ef2e2 Mon Sep 17 00:00:00 2001 From: Martin Hutchinson Date: Thu, 5 Mar 2026 15:22:06 +0000 Subject: [PATCH 6/6] Top levels docs --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 4d5fbead6..53c3b6be1 100644 --- a/README.md +++ b/README.md @@ -357,6 +357,12 @@ This allows applications built with Tessera to block until leaves passed via cal > [!Tip] > This is useful if e.g. your application needs to return an inclusion proof in response to a request to add an entry to the log. +### Logging + +Tessera utilizes the standard Go `log/slog` package for structured logging. By default, log events are emitted with their corresponding severity levels and key-value properties. + +Personalities and operators of the log can customize how these logs are handled by configuring the default `slog.Handler`. For example, operators deploying to Google Cloud can set up an `slog.Handler` that automatically correlates logs with [OpenTelemetry distributed traces](https://cloud.google.com/logging/docs/structured-logging) using the context-aware properties from the incoming `http.Request`. The example programs in `/cmd/` provide reference initializations of these handlers for their respective environments. + ## Lifecycles ### Appender