diff --git a/compressai/entropy_models/entropy_models.py b/compressai/entropy_models/entropy_models.py index acb98333..e43538dd 100644 --- a/compressai/entropy_models/entropy_models.py +++ b/compressai/entropy_models/entropy_models.py @@ -196,7 +196,9 @@ def dequantize( @classmethod def _dequantize(cls, inputs: Tensor, means: Optional[Tensor] = None) -> Tensor: - warnings.warn("_dequantize. Use dequantize instead.", stacklevel=2) + warnings.warn( + "_dequantize is deprecated. Use dequantize instead.", stacklevel=2 + ) return cls.dequantize(inputs, means) def _pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length): @@ -395,7 +397,7 @@ def _get_medians(self) -> Tensor: def update(self, force: bool = False, update_quantiles: bool = False) -> bool: # Check if we need to update the bottleneck parameters, the offsets are - # only computed and stored when the conditonal model is update()'d. + # only computed and stored when the conditional model is update()'d. if self._offset.numel() > 0 and not force: return False @@ -437,7 +439,7 @@ def loss(self) -> Tensor: return loss def _logits_cumulative(self, inputs: Tensor, stop_gradient: bool) -> Tensor: - # TorchScript not yet working (nn.Mmodule indexing not supported) + # TorchScript not yet working (nn.Module indexing not supported) logits = inputs for i in range(len(self.filters) + 1): matrix = self.matrices[i]