@@ -23,7 +23,6 @@ namespace phi {
2323namespace funcs {
2424
2525using ScopedTensorDescriptor = phi::backends::gpu::ScopedTensorDescriptor;
26- using GpuDataLayout = phi::backends::gpu::DataLayout;
2726template <typename T>
2827using CudnnDataType = phi::backends::gpu::CudnnDataType<T>;
2928
@@ -36,9 +35,9 @@ void SoftmaxCUDNNFunctor<T, DeviceContext>::operator()(
3635 ScopedTensorDescriptor xDesc;
3736 ScopedTensorDescriptor yDesc;
3837 std::vector<int > cudnn_tensor_dims = common::vectorize<int >(X->dims ());
39- GpuDataLayout layout = GpuDataLayout:: kNCHW ;
38+ DataLayout layout = DataLayout::NCHW ;
4039 if (cudnn_tensor_dims.size () == 5 ) {
41- layout = GpuDataLayout:: kNCDHW ;
40+ layout = DataLayout::NCDHW ;
4241 }
4342 // NOTE(*) : cudnn softmax only support >= 4D phi::DenseTensor,
4443 // fill 1 at unused dims
@@ -89,9 +88,9 @@ void SoftmaxGradCUDNNFunctor<T, DeviceContext>::operator()(
8988 ScopedTensorDescriptor dyDesc;
9089 ScopedTensorDescriptor dxDesc;
9190 std::vector<int > cudnn_tensor_dims = common::vectorize<int >(Y->dims ());
92- GpuDataLayout layout = GpuDataLayout:: kNCHW ;
91+ DataLayout layout = DataLayout::NCHW ;
9392 if (cudnn_tensor_dims.size () == 5 ) {
94- layout = GpuDataLayout:: kNCDHW ;
93+ layout = DataLayout::NCDHW ;
9594 }
9695 // NOTE(*) : cudnn softmax only support >= 4D phi::DenseTensor,
9796 // fill 1 at unused dims
0 commit comments