Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

## Unreleased

**Breaking Changes**:

- Switch Statsd histogram metrics to distribution metrics. ([#5378](https://github.com/getsentry/relay/pull/5378))

**Features**:

- Support comparing release versions without build code. ([#5376](https://github.com/getsentry/relay/pull/5376))
Expand Down
6 changes: 3 additions & 3 deletions relay-cardinality/src/redis/limiter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use crate::{
script::{CardinalityScript, CardinalityScriptResult, Status},
state::{LimitState, RedisEntry},
},
statsd::{CardinalityLimiterHistograms, CardinalityLimiterTimers},
statsd::{CardinalityLimiterDistributions, CardinalityLimiterTimers},
};
use relay_common::time::UnixTimestamp;

Expand Down Expand Up @@ -78,7 +78,7 @@ impl RedisSetLimiter {
}

metric!(
histogram(CardinalityLimiterHistograms::RedisCheckHashes) = num_hashes,
distribution(CardinalityLimiterDistributions::RedisCheckHashes) = num_hashes,
id = state.id(),
);

Expand All @@ -90,7 +90,7 @@ impl RedisSetLimiter {
.zip(results)
.inspect(|(_, result)| {
metric!(
histogram(CardinalityLimiterHistograms::RedisSetCardinality) =
distribution(CardinalityLimiterDistributions::RedisSetCardinality) =
result.cardinality as u64,
id = state.id(),
);
Expand Down
6 changes: 3 additions & 3 deletions relay-cardinality/src/statsd.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use relay_statsd::TimerMetric;
#[cfg(feature = "redis")]
use relay_statsd::{CounterMetric, HistogramMetric, SetMetric};
use relay_statsd::{CounterMetric, DistributionMetric, SetMetric};

/// Counter metrics for the Relay Cardinality Limiter.
#[cfg(feature = "redis")]
Expand Down Expand Up @@ -87,7 +87,7 @@ impl TimerMetric for CardinalityLimiterTimers {
}

#[cfg(feature = "redis")]
pub enum CardinalityLimiterHistograms {
pub enum CardinalityLimiterDistributions {
/// Amount of hashes sent to Redis to check the cardinality.
///
/// This metric is tagged with:
Expand All @@ -103,7 +103,7 @@ pub enum CardinalityLimiterHistograms {
}

#[cfg(feature = "redis")]
impl HistogramMetric for CardinalityLimiterHistograms {
impl DistributionMetric for CardinalityLimiterDistributions {
fn name(&self) -> &'static str {
match *self {
#[cfg(feature = "redis")]
Expand Down
4 changes: 2 additions & 2 deletions relay-kafka/src/producer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use crate::config::{KafkaParams, KafkaTopic};
use crate::debounced::Debounced;
use crate::limits::KafkaRateLimits;
use crate::producer::utils::KafkaHeaders;
use crate::statsd::{KafkaCounters, KafkaGauges, KafkaHistograms};
use crate::statsd::{KafkaCounters, KafkaDistributions, KafkaGauges};

mod utils;
use utils::{Context, ThreadedProducer};
Expand Down Expand Up @@ -217,7 +217,7 @@ impl Producer {
let producer_name = producer.context().producer_name();

metric!(
histogram(KafkaHistograms::KafkaMessageSize) = payload.len() as u64,
distribution(KafkaDistributions::KafkaMessageSize) = payload.len() as u64,
variant = variant,
topic = topic_name,
producer_name = producer_name
Expand Down
6 changes: 3 additions & 3 deletions relay-kafka/src/statsd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
//! # Results in: producer_name="unknown"
//! ```

use relay_statsd::{CounterMetric, GaugeMetric, HistogramMetric};
use relay_statsd::{CounterMetric, DistributionMetric, GaugeMetric};

pub enum KafkaCounters {
/// Number of messages that failed to be enqueued in the Kafka producer's memory buffer.
Expand Down Expand Up @@ -84,7 +84,7 @@ impl CounterMetric for KafkaCounters {
}
}

pub enum KafkaHistograms {
pub enum KafkaDistributions {
/// Size of emitted kafka message in bytes.
///
/// This metric is tagged with:
Expand All @@ -94,7 +94,7 @@ pub enum KafkaHistograms {
KafkaMessageSize,
}

impl HistogramMetric for KafkaHistograms {
impl DistributionMetric for KafkaDistributions {
fn name(&self) -> &'static str {
match self {
Self::KafkaMessageSize => "kafka.message_size",
Expand Down
4 changes: 2 additions & 2 deletions relay-server/src/endpoints/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::service::ServiceState;
use crate::services::buffer::ProjectKeyPair;
use crate::services::outcome::{DiscardItemType, DiscardReason, Outcome};
use crate::services::processor::{BucketSource, MetricData, ProcessMetrics};
use crate::statsd::{RelayCounters, RelayHistograms};
use crate::statsd::{RelayCounters, RelayDistributions};
use crate::utils::{self, ApiErrorResponse, FormDataIter};

#[derive(Clone, Copy, Debug, thiserror::Error)]
Expand Down Expand Up @@ -380,7 +380,7 @@ fn emit_envelope_metrics(envelope: &Envelope) {
let is_container = if item.is_container() { "true" } else { "false" };

metric!(
histogram(RelayHistograms::EnvelopeItemSize) = item.payload().len() as u64,
distribution(RelayDistributions::EnvelopeItemSize) = item.payload().len() as u64,
item_type = item_type,
is_container = is_container,
);
Expand Down
6 changes: 3 additions & 3 deletions relay-server/src/processing/utils/event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ use crate::envelope::{Envelope, EnvelopeHeaders, Item};
use crate::processing::Context;
use crate::services::processor::{MINIMUM_CLOCK_DRIFT, ProcessingError};
use crate::services::projects::project::ProjectInfo;
use crate::statsd::{RelayCounters, RelayHistograms, RelayTimers};
use crate::utils::{self};
use crate::statsd::{RelayCounters, RelayDistributions, RelayTimers};
use crate::utils;

/// Returns the data category if there is an event.
///
Expand Down Expand Up @@ -130,7 +130,7 @@ pub fn finalize<'a>(

let span_count = inner_event.spans.value().map(Vec::len).unwrap_or(0) as u64;
metric!(
histogram(RelayHistograms::EventSpans) = span_count,
distribution(RelayDistributions::EventSpans) = span_count,
sdk = client_name,
platform = platform,
);
Expand Down
6 changes: 3 additions & 3 deletions relay-server/src/services/buffer/envelope_buffer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::services::buffer::envelope_store::sqlite::SqliteEnvelopeStoreError;
use crate::services::buffer::stack_provider::memory::MemoryStackProvider;
use crate::services::buffer::stack_provider::sqlite::SqliteStackProvider;
use crate::services::buffer::stack_provider::{StackCreationType, StackProvider};
use crate::statsd::{RelayGauges, RelayHistograms, RelayTimers};
use crate::statsd::{RelayDistributions, RelayGauges, RelayTimers};
use crate::utils::MemoryChecker;

/// Polymorphic envelope buffering interface.
Expand Down Expand Up @@ -79,7 +79,7 @@ impl PolymorphicEnvelopeBuffer {
/// Adds an envelope to the buffer.
pub async fn push(&mut self, envelope: Box<Envelope>) -> Result<(), EnvelopeBufferError> {
relay_statsd::metric!(
histogram(RelayHistograms::BufferEnvelopeBodySize) =
distribution(RelayDistributions::BufferEnvelopeBodySize) =
envelope.items().map(Item::len).sum::<usize>() as u64,
partition_id = self.partition_tag()
);
Expand Down Expand Up @@ -576,7 +576,7 @@ where
false => "false",
};
relay_statsd::metric!(
histogram(RelayHistograms::BufferEnvelopesCount) = total_count,
distribution(RelayDistributions::BufferEnvelopesCount) = total_count,
initialized = initialized,
stack_type = self.stack_provider.stack_type(),
partition_id = &self.partition_tag
Expand Down
6 changes: 3 additions & 3 deletions relay-server/src/services/buffer/envelope_store/sqlite.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use crate::envelope::EnvelopeError;

use crate::Envelope;
use crate::services::buffer::common::ProjectKeyPair;
use crate::statsd::{RelayGauges, RelayHistograms, RelayTimers};
use crate::statsd::{RelayDistributions, RelayGauges, RelayTimers};
use bytes::{Buf, Bytes};
use chrono::{DateTime, Utc};
use futures::stream::StreamExt;
Expand Down Expand Up @@ -141,15 +141,15 @@ impl<'a> TryFrom<&'a Envelope> for DatabaseEnvelope {

let serialized_envelope = value.to_vec()?;
relay_statsd::metric!(
histogram(RelayHistograms::BufferEnvelopeSize) = serialized_envelope.len() as u64
distribution(RelayDistributions::BufferEnvelopeSize) = serialized_envelope.len() as u64
);

let encoded_envelope =
relay_statsd::metric!(timer(RelayTimers::BufferEnvelopeCompression), {
zstd::encode_all(serialized_envelope.as_slice(), Self::COMPRESSION_LEVEL)?
});
relay_statsd::metric!(
histogram(RelayHistograms::BufferEnvelopeSizeCompressed) =
distribution(RelayDistributions::BufferEnvelopeSizeCompressed) =
encoded_envelope.len() as u64
);

Expand Down
20 changes: 13 additions & 7 deletions relay-server/src/services/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ use crate::services::projects::project::{ProjectInfo, ProjectState};
use crate::services::upstream::{
SendRequest, Sign, SignatureType, UpstreamRelay, UpstreamRequest, UpstreamRequestError,
};
use crate::statsd::{RelayCounters, RelayHistograms, RelayTimers};
use crate::statsd::{RelayCounters, RelayDistributions, RelayTimers};
use crate::utils::{self, CheckLimits, EnvelopeLimiter, SamplingResult};
use crate::{http, processing};
use relay_threading::AsyncPool;
Expand Down Expand Up @@ -2770,7 +2770,7 @@ impl EnvelopeProcessorService {
let dsn = PartialDsn::outbound(scoping, upstream);

relay_statsd::metric!(
histogram(RelayHistograms::PartitionKeys) = u64::from(partition_key)
distribution(RelayDistributions::PartitionKeys) = u64::from(partition_key)
);

let mut num_batches = 0;
Expand All @@ -2789,14 +2789,16 @@ impl EnvelopeProcessorService {
.scope(*scoping);

relay_statsd::metric!(
histogram(RelayHistograms::BucketsPerBatch) = batch.len() as u64
distribution(RelayDistributions::BucketsPerBatch) = batch.len() as u64
);

self.submit_upstream(cogs, Submit::Envelope(envelope.into_processed()));
num_batches += 1;
}

relay_statsd::metric!(histogram(RelayHistograms::BatchesPerPartition) = num_batches);
relay_statsd::metric!(
distribution(RelayDistributions::BatchesPerPartition) = num_batches
);
}
}

Expand Down Expand Up @@ -2874,7 +2876,7 @@ impl EnvelopeProcessorService {
}

if partition_splits > 0 {
metric!(histogram(RelayHistograms::PartitionSplits) = partition_splits);
metric!(distribution(RelayDistributions::PartitionSplits) = partition_splits);
}

self.send_global_partition(partition_key, &mut partition);
Expand Down Expand Up @@ -3139,7 +3141,9 @@ impl UpstreamRequest for SendEnvelope {

fn build(&mut self, builder: &mut http::RequestBuilder) -> Result<(), http::HttpError> {
let envelope_body = self.body.clone();
metric!(histogram(RelayHistograms::UpstreamEnvelopeBodySize) = envelope_body.len() as u64);
metric!(
distribution(RelayDistributions::UpstreamEnvelopeBodySize) = envelope_body.len() as u64
);

let meta = &self.envelope.meta();
let shard = self.envelope.partition_key().map(|p| p.to_string());
Expand Down Expand Up @@ -3355,7 +3359,9 @@ impl UpstreamRequest for SendMetricsRequest {
}

fn build(&mut self, builder: &mut http::RequestBuilder) -> Result<(), http::HttpError> {
metric!(histogram(RelayHistograms::UpstreamMetricsBodySize) = self.encoded.len() as u64);
metric!(
distribution(RelayDistributions::UpstreamMetricsBodySize) = self.encoded.len() as u64
);

builder
.content_encoding(self.http_encoding)
Expand Down
7 changes: 4 additions & 3 deletions relay-server/src/services/projects/cache/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use relay_statsd::metric;

use crate::services::projects::project::{ProjectState, Revision};
use crate::services::projects::source::SourceProjectState;
use crate::statsd::{RelayHistograms, RelayTimers};
use crate::statsd::{RelayDistributions, RelayTimers};
use crate::utils::{RetryBackoff, UniqueScheduledQueue};

/// The backing storage for a project cache.
Expand Down Expand Up @@ -90,11 +90,12 @@ impl ProjectStore {
};

metric!(
histogram(RelayHistograms::ProjectStateCacheSize) = self.shared.projects.len() as u64,
distribution(RelayDistributions::ProjectStateCacheSize) =
self.shared.projects.len() as u64,
storage = "shared"
);
metric!(
histogram(RelayHistograms::ProjectStateCacheSize) = self.private.len() as u64,
distribution(RelayDistributions::ProjectStateCacheSize) = self.private.len() as u64,
storage = "private"
);

Expand Down
6 changes: 3 additions & 3 deletions relay-server/src/services/projects/source/redis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use std::sync::Arc;

use crate::services::projects::project::{ParsedProjectState, ProjectState, Revision};
use crate::services::projects::source::SourceProjectState;
use crate::statsd::{RelayCounters, RelayHistograms, RelayTimers};
use crate::statsd::{RelayCounters, RelayDistributions, RelayTimers};
use relay_redis::redis::cmd;

#[derive(Clone, Debug)]
Expand All @@ -33,11 +33,11 @@ fn parse_redis_response(raw_response: &[u8]) -> Result<ParsedProjectState, Redis
let decoded_response = match &decompression_result {
Ok(decoded) => {
metric!(
histogram(RelayHistograms::ProjectStateSizeBytesCompressed) =
distribution(RelayDistributions::ProjectStateSizeBytesCompressed) =
raw_response.len() as f64
);
metric!(
histogram(RelayHistograms::ProjectStateSizeBytesDecompressed) =
distribution(RelayDistributions::ProjectStateSizeBytesDecompressed) =
decoded.len() as f64
);
decoded.as_slice()
Expand Down
18 changes: 11 additions & 7 deletions relay-server/src/services/projects/source/upstream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use crate::services::projects::source::{FetchProjectState, SourceProjectState};
use crate::services::upstream::{
Method, RequestPriority, SendQuery, UpstreamQuery, UpstreamRelay, UpstreamRequestError,
};
use crate::statsd::{RelayCounters, RelayHistograms, RelayTimers};
use crate::statsd::{RelayCounters, RelayDistributions, RelayTimers};
use crate::utils::{RetryBackoff, SleepHandle};

/// A query to retrieve a batch of project states from upstream.
Expand Down Expand Up @@ -289,7 +289,7 @@ impl UpstreamProjectSourceService {
.filter(|(id, channel)| {
if channel.expired() {
metric!(
histogram(RelayHistograms::ProjectStateAttempts) = channel.attempts,
distribution(RelayDistributions::ProjectStateAttempts) = channel.attempts,
result = "timeout",
);
metric!(
Expand All @@ -315,7 +315,10 @@ impl UpstreamProjectSourceService {

let total_count = cache_channels.len() + nocache_channels.len();

metric!(histogram(RelayHistograms::ProjectStatePending) = self.state_channels.len() as u64);
metric!(
distribution(RelayDistributions::ProjectStatePending) =
self.state_channels.len() as u64
);

relay_log::debug!(
"updating project states for {}/{} projects (attempt {})",
Expand Down Expand Up @@ -373,7 +376,7 @@ impl UpstreamProjectSourceService {
}
relay_log::debug!("sending request of size {}", channels_batch.len());
metric!(
histogram(RelayHistograms::ProjectStateRequestBatchSize) =
distribution(RelayDistributions::ProjectStateRequestBatchSize) =
channels_batch.len() as u64
);

Expand Down Expand Up @@ -449,7 +452,7 @@ impl UpstreamProjectSourceService {

// Count number of project states returned (via http requests).
metric!(
histogram(RelayHistograms::ProjectStateReceived) =
distribution(RelayDistributions::ProjectStateReceived) =
response.configs.len() as u64
);
for (key, mut channel) in channels_batch {
Expand Down Expand Up @@ -484,7 +487,8 @@ impl UpstreamProjectSourceService {
};

metric!(
histogram(RelayHistograms::ProjectStateAttempts) = channel.attempts,
distribution(RelayDistributions::ProjectStateAttempts) =
channel.attempts,
result = result,
);
metric!(
Expand Down Expand Up @@ -515,7 +519,7 @@ impl UpstreamProjectSourceService {
}

metric!(
histogram(RelayHistograms::ProjectStatePending) =
distribution(RelayDistributions::ProjectStatePending) =
self.state_channels.len() as u64
);
// Put the channels back into the queue, we will retry again shortly.
Expand Down
Loading
Loading