From 5f6a2f47fd9e8afbf68ecc5f5ccd1cc35138b5dc Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:40:00 +0900 Subject: [PATCH 01/13] success test file set, todo build success --- tensorflow/lite/micro/kernels/one_hot.cc | 215 ++++++++++++++++++++++ tensorflow/lite/micro/one_hot_test.cc | 201 ++++++++++++++++++++ tensorflow/lite/micro/tools/make/Makefile | 7 +- 3 files changed, 421 insertions(+), 2 deletions(-) create mode 100644 tensorflow/lite/micro/kernels/one_hot.cc create mode 100644 tensorflow/lite/micro/one_hot_test.cc diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc new file mode 100644 index 00000000000..950edb38719 --- /dev/null +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -0,0 +1,215 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include + +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +namespace tflite { +namespace ops { +namespace builtin { +namespace one_hot { + +constexpr int kIndicesTensor = 0; +constexpr int kDepthTensor = 1; +constexpr int kOnValueTensor = 2; +constexpr int kOffValueTensor = 3; +constexpr int kOutputTensor = 0; + +// Convenience utility for destructuring a node into the appropriate tensors and +// data for the op. Note that this destructuring is quite cheap, so we can avoid +// allocating op-specific, persistent data on the heap. +struct OneHotContext { + OneHotContext(TfLiteContext* context, TfLiteNode* node) { + indices = GetInput(context, node, kIndicesTensor); + depth = GetInput(context, node, kDepthTensor); + on_value = GetInput(context, node, kOnValueTensor); + off_value = GetInput(context, node, kOffValueTensor); + output = GetOutput(context, node, kOutputTensor); + + const auto* params = + reinterpret_cast(node->builtin_data); + const int indices_dims = indices->dims->size; + axis = (params->axis == -1) ? indices_dims : params->axis; + output_dims = indices_dims + 1; + dtype = on_value->type; + } + + const TfLiteTensor* indices; + const TfLiteTensor* depth; + const TfLiteTensor* on_value; + const TfLiteTensor* off_value; + TfLiteTensor* output; + int axis; + int output_dims; + TfLiteType dtype; +}; + +template +void OneHotComputeImpl(const OneHotContext& op_context) { + // prefix_dim_size == # of elements before the axis + // depth == # of elements per axis + // suffix_dim_size == # of elements after the axis + int prefix_dim_size = 1; + for (int i = 0; i < op_context.axis; ++i) { + prefix_dim_size *= op_context.indices->dims->data[i]; + } + if (prefix_dim_size == 0) { + // If indices tensor is degenerate, return a degenerate tensor, just like + // TensorFlow does. + return; + } + const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; + const int depth = *op_context.depth->data.i32; + + const T on_value = *GetTensorData(op_context.on_value); + const T off_value = *GetTensorData(op_context.off_value); + + // View the indices as a matrix of size: + // prefix_dim_size x suffix_dim_size + // View the output as a matrix of size: + // prefix_dim_size x depth x suffix_dim_size + // Then the output is: + // output(i, j, k) == (indices(i, k) == j) ? on : off + T* output = GetTensorData(op_context.output); + const TI* indices = GetTensorData(op_context.indices); + for (int i = 0; i < prefix_dim_size; ++i) { + for (int j = 0; j < depth; ++j) { + for (int k = 0; k < suffix_dim_size; ++k, ++output) { + *output = static_cast(indices[i * suffix_dim_size + k]) == j + ? on_value + : off_value; + } + } + } +} + +template +void OneHotCompute(const OneHotContext& op_context) { + if (op_context.indices->type == kTfLiteInt64) { + OneHotComputeImpl(op_context); + } else { + OneHotComputeImpl(op_context); + } +} + +TfLiteStatus ResizeOutputTensor(TfLiteContext* context, + const OneHotContext& op_context) { + TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); + TfLiteIntArray* output_size = TfLiteIntArrayCreate(op_context.output_dims); + for (int i = 0; i < op_context.output_dims; ++i) { + if (i < op_context.axis) { + output_size->data[i] = op_context.indices->dims->data[i]; + } else if (i == op_context.axis) { + output_size->data[i] = *op_context.depth->data.i32; + } else { + output_size->data[i] = op_context.indices->dims->data[i - 1]; + } + } + return context->ResizeTensor(context, op_context.output, output_size); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + OneHotContext op_context{context, node}; + TF_LITE_ENSURE(context, op_context.output != nullptr); + switch (op_context.dtype) { + // TODO(b/111744875): Support uint8 and quantization. + case kTfLiteFloat32: + case kTfLiteInt16: + case kTfLiteInt32: + case kTfLiteInt64: + case kTfLiteInt8: + case kTfLiteUInt8: + case kTfLiteBool: + op_context.output->type = op_context.dtype; + break; + default: + TF_LITE_KERNEL_LOG(context, "Unknown output data type: %s", + TfLiteTypeGetName(op_context.dtype)); + return kTfLiteError; + } + + TF_LITE_ENSURE(context, op_context.indices->type == kTfLiteInt32 || + op_context.indices->type == kTfLiteInt64); + TF_LITE_ENSURE(context, op_context.axis >= 0 && + op_context.axis < op_context.output_dims); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.depth), 1); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.on_value), 1); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.off_value), 1); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.on_value->type, op_context.dtype); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type, + op_context.dtype); + + if (!IsConstantOrPersistentTensor(op_context.depth)) { + SetTensorToDynamic(op_context.output); + return kTfLiteOk; + } + + return ResizeOutputTensor(context, op_context); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + OneHotContext op_context{context, node}; + + if (IsDynamicTensor(op_context.output)) { + TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); + } + + switch (op_context.output->type) { + case kTfLiteFloat32: + OneHotCompute(op_context); + break; + case kTfLiteInt32: + OneHotCompute(op_context); + break; + case kTfLiteInt64: + OneHotCompute(op_context); + break; + case kTfLiteInt8: + OneHotCompute(op_context); + break; + case kTfLiteUInt8: + OneHotCompute(op_context); + break; + case kTfLiteBool: + OneHotCompute(op_context); + break; + default: + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace one_hot + +TfLiteRegistration* Register_ONE_HOT() { + static TfLiteRegistration r = { + nullptr, + nullptr, + one_hot::Prepare, + one_hot::Eval, + }; + return &r; +} + +} // namespace builtin +} // namespace ops +} // namespace tflite \ No newline at end of file diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc new file mode 100644 index 00000000000..89f27d18581 --- /dev/null +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -0,0 +1,201 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include + +#include +#include +#include + +#include "tensorflow/lite/kernels/test_util.h" +#include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +namespace { + +using ::testing::ElementsAreArray; + +template +class OneHotOpModel : public SingleOpModel { + public: + OneHotOpModel(std::initializer_list input_shape, int depth_value, + TensorType dtype, int axis = -1, T on_value = 1, + T off_value = 0, TensorType indices_type = TensorType_INT32) { + indices_ = AddInput(indices_type); + int depth = AddInput(TensorType_INT32); + int on = AddInput(dtype); + int off = AddInput(dtype); + output_ = AddOutput(dtype); + SetBuiltinOp(BuiltinOperator_ONE_HOT, BuiltinOptions_OneHotOptions, + CreateOneHotOptions(builder_, axis).Union()); + BuildInterpreter({input_shape}); + + PopulateTensor(depth, {depth_value}); + PopulateTensor(on, {on_value}); + PopulateTensor(off, {off_value}); + } + + template + void SetIndices(std::initializer_list data) { + PopulateTensor(indices_, data); + } + + TfLiteStatus InvokeWithResult() { return interpreter_->Invoke(); } + + int32_t GetOutputSize() { return GetTensorSize(output_); } + std::vector GetOutput() { return ExtractVector(output_); } + std::vector GetOutputShape() { return GetTensorShape(output_); } + + private: + int indices_; + int output_; +}; + +TEST(OneHotOpTest, BasicFloat) { + const int depth = 3; + OneHotOpModel model({3}, depth, TensorType_FLOAT32); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), + Pointwise(FloatingPointEq(), + {1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f})); +} + +TEST(OneHotOpTest, BasicInt) { + const int depth = 3; + OneHotOpModel model({3}, depth, TensorType_INT32); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); +} + +TEST(OneHotOpTest, BasicInt8) { + const int depth = 3; + OneHotOpModel model({3}, depth, TensorType_INT8); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); +} + +TEST(OneHotOpTest, BasicUint8) { + const int depth = 3; + OneHotOpModel model({3}, depth, TensorType_UINT8); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); +} + +TEST(OneHotOpTest, BasicBool) { + const int depth = 3; + OneHotOpModel model({3}, depth, TensorType_BOOL); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), + ElementsAreArray({true, false, false, false, true, false, false, + false, true})); +} + +TEST(OneHotOpTest, SmallDepth) { + const int depth = 1; + OneHotOpModel model({3}, depth, TensorType_INT32); + model.SetIndices({0, 1, 2}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 1})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0})); +} + +TEST(OneHotOpTest, BigDepth) { + const int depth = 4; + OneHotOpModel model({2}, depth, TensorType_INT32); + model.SetIndices({0, 1}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 0, 1, 0, 0})); +} + +TEST(OneHotOpTest, OnOffValues) { + const int depth = 3; + const int axis = -1; + const int on = 5; + const int off = 0; + OneHotOpModel model({4}, depth, TensorType_INT32, axis, on, off); + model.SetIndices({0, 2, -1, 1}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({4, 3})); + EXPECT_THAT(model.GetOutput(), + ElementsAreArray({5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 5, 0})); +} + +TEST(OneHotOpTest, ZeroAxis) { + const int depth = 3; + const int axis = 0; + const int on = 5; + const int off = 0; + OneHotOpModel model({4}, depth, TensorType_INT32, axis, on, off); + model.SetIndices({0, 2, -1, 1}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 4})); + EXPECT_THAT(model.GetOutput(), + ElementsAreArray({5, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0})); +} + +TEST(OneHotOpTest, MultiDimensionalIndices) { + const int depth = 3; + const int axis = -1; + const float on = 2; + const float off = 0; + OneHotOpModel model({2, 2}, depth, TensorType_FLOAT32, axis, on, off); + model.SetIndices({0, 2, 1, -1}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 3})); + EXPECT_THAT(model.GetOutput(), + ElementsAreArray({2, 0, 0, 0, 0, 2, 0, 2, 0, 0, 0, 0})); +} + +TEST(OneHotOpTest, Int64Indices) { + const int depth = 3; + const int axis = -1; + const int on = 1; + const int off = 0; + OneHotOpModel model({3}, depth, TensorType_INT32, axis, on, off, + TensorType_INT64); + std::initializer_list indices = {0, 1, 2}; + model.SetIndices(indices); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); + EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); +} + +} // namespace +} // namespace tflite \ No newline at end of file diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index 21f21a1ce05..ecf02388bf1 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -361,7 +361,8 @@ $(TENSORFLOW_ROOT)tensorflow/lite/micro/arena_allocator/single_arena_buffer_allo $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing_helpers_test.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc \ -$(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim_test.cc +$(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim_test.cc \ +$(TENSORFLOW_ROOT)tensorflow/lite/micro/one_hot_test.cc MICROLITE_CC_KERNEL_SRCS := \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/activations.cc \ @@ -480,7 +481,9 @@ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.cc $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/unpack.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/var_handle.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/while.cc \ -$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/zeros_like.cc +$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/zeros_like.cc \ +$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/one_hot.cc \ + MICROLITE_CC_SIGNAL_KERNEL_SRCS := \ $(TENSORFLOW_ROOT)signal/micro/kernels/delay.cc \ From e4fe10ee6d66b30318900b0ea9ce7c7c236e1584 Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:40:59 +0900 Subject: [PATCH 02/13] migration tf to tflm --- tensorflow/lite/micro/kernels/one_hot.cc | 373 +++++++++--------- tensorflow/lite/micro/kernels/one_hot.h | 0 tensorflow/lite/micro/one_hot_test.cc | 190 ++------- .../micro/testing/one_hot_test_model_data.cc | 20 + 4 files changed, 239 insertions(+), 344 deletions(-) create mode 100644 tensorflow/lite/micro/kernels/one_hot.h create mode 100644 tensorflow/lite/micro/testing/one_hot_test_model_data.cc diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index 950edb38719..60a6efef9e5 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -24,192 +24,201 @@ namespace ops { namespace builtin { namespace one_hot { -constexpr int kIndicesTensor = 0; -constexpr int kDepthTensor = 1; -constexpr int kOnValueTensor = 2; -constexpr int kOffValueTensor = 3; -constexpr int kOutputTensor = 0; - -// Convenience utility for destructuring a node into the appropriate tensors and -// data for the op. Note that this destructuring is quite cheap, so we can avoid -// allocating op-specific, persistent data on the heap. -struct OneHotContext { - OneHotContext(TfLiteContext* context, TfLiteNode* node) { - indices = GetInput(context, node, kIndicesTensor); - depth = GetInput(context, node, kDepthTensor); - on_value = GetInput(context, node, kOnValueTensor); - off_value = GetInput(context, node, kOffValueTensor); - output = GetOutput(context, node, kOutputTensor); - - const auto* params = - reinterpret_cast(node->builtin_data); - const int indices_dims = indices->dims->size; - axis = (params->axis == -1) ? indices_dims : params->axis; - output_dims = indices_dims + 1; - dtype = on_value->type; - } - - const TfLiteTensor* indices; - const TfLiteTensor* depth; - const TfLiteTensor* on_value; - const TfLiteTensor* off_value; - TfLiteTensor* output; - int axis; - int output_dims; - TfLiteType dtype; -}; - -template -void OneHotComputeImpl(const OneHotContext& op_context) { - // prefix_dim_size == # of elements before the axis - // depth == # of elements per axis - // suffix_dim_size == # of elements after the axis - int prefix_dim_size = 1; - for (int i = 0; i < op_context.axis; ++i) { - prefix_dim_size *= op_context.indices->dims->data[i]; - } - if (prefix_dim_size == 0) { - // If indices tensor is degenerate, return a degenerate tensor, just like - // TensorFlow does. - return; - } - const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; - const int depth = *op_context.depth->data.i32; - - const T on_value = *GetTensorData(op_context.on_value); - const T off_value = *GetTensorData(op_context.off_value); - - // View the indices as a matrix of size: - // prefix_dim_size x suffix_dim_size - // View the output as a matrix of size: - // prefix_dim_size x depth x suffix_dim_size - // Then the output is: - // output(i, j, k) == (indices(i, k) == j) ? on : off - T* output = GetTensorData(op_context.output); - const TI* indices = GetTensorData(op_context.indices); - for (int i = 0; i < prefix_dim_size; ++i) { - for (int j = 0; j < depth; ++j) { - for (int k = 0; k < suffix_dim_size; ++k, ++output) { - *output = static_cast(indices[i * suffix_dim_size + k]) == j - ? on_value - : off_value; - } - } - } -} - -template -void OneHotCompute(const OneHotContext& op_context) { - if (op_context.indices->type == kTfLiteInt64) { - OneHotComputeImpl(op_context); - } else { - OneHotComputeImpl(op_context); - } -} - -TfLiteStatus ResizeOutputTensor(TfLiteContext* context, - const OneHotContext& op_context) { - TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); - TfLiteIntArray* output_size = TfLiteIntArrayCreate(op_context.output_dims); - for (int i = 0; i < op_context.output_dims; ++i) { - if (i < op_context.axis) { - output_size->data[i] = op_context.indices->dims->data[i]; - } else if (i == op_context.axis) { - output_size->data[i] = *op_context.depth->data.i32; - } else { - output_size->data[i] = op_context.indices->dims->data[i - 1]; - } - } - return context->ResizeTensor(context, op_context.output, output_size); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - OneHotContext op_context{context, node}; - TF_LITE_ENSURE(context, op_context.output != nullptr); - switch (op_context.dtype) { - // TODO(b/111744875): Support uint8 and quantization. - case kTfLiteFloat32: - case kTfLiteInt16: - case kTfLiteInt32: - case kTfLiteInt64: - case kTfLiteInt8: - case kTfLiteUInt8: - case kTfLiteBool: - op_context.output->type = op_context.dtype; - break; - default: - TF_LITE_KERNEL_LOG(context, "Unknown output data type: %s", - TfLiteTypeGetName(op_context.dtype)); - return kTfLiteError; - } - - TF_LITE_ENSURE(context, op_context.indices->type == kTfLiteInt32 || - op_context.indices->type == kTfLiteInt64); - TF_LITE_ENSURE(context, op_context.axis >= 0 && - op_context.axis < op_context.output_dims); - TF_LITE_ENSURE_EQ(context, NumElements(op_context.depth), 1); - TF_LITE_ENSURE_EQ(context, NumElements(op_context.on_value), 1); - TF_LITE_ENSURE_EQ(context, NumElements(op_context.off_value), 1); - TF_LITE_ENSURE_TYPES_EQ(context, op_context.on_value->type, op_context.dtype); - TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type, - op_context.dtype); - - if (!IsConstantOrPersistentTensor(op_context.depth)) { - SetTensorToDynamic(op_context.output); - return kTfLiteOk; - } - - return ResizeOutputTensor(context, op_context); +TfLiteRegistration_V1* Register_ONE_HOT() { + static TfLiteRegistration_V1 r = {/*init, free, prepare, eval*/}; + return &r; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - OneHotContext op_context{context, node}; - - if (IsDynamicTensor(op_context.output)) { - TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); - } - - switch (op_context.output->type) { - case kTfLiteFloat32: - OneHotCompute(op_context); - break; - case kTfLiteInt32: - OneHotCompute(op_context); - break; - case kTfLiteInt64: - OneHotCompute(op_context); - break; - case kTfLiteInt8: - OneHotCompute(op_context); - break; - case kTfLiteUInt8: - OneHotCompute(op_context); - break; - case kTfLiteBool: - OneHotCompute(op_context); - break; - default: - return kTfLiteError; - } - - return kTfLiteOk; -} +// constexpr int kIndicesTensor = 0; +// constexpr int kDepthTensor = 1; +// constexpr int kOnValueTensor = 2; +// constexpr int kOffValueTensor = 3; +// constexpr int kOutputTensor = 0; + +// // Convenience utility for destructuring a node into the appropriate tensors +// and +// // data for the op. Note that this destructuring is quite cheap, so we can +// avoid +// // allocating op-specific, persistent data on the heap. +// struct OneHotContext { +// OneHotContext(TfLiteContext* context, TfLiteNode* node) { +// indices = GetInput(context, node, kIndicesTensor); +// depth = GetInput(context, node, kDepthTensor); +// on_value = GetInput(context, node, kOnValueTensor); +// off_value = GetInput(context, node, kOffValueTensor); +// output = GetOutput(context, node, kOutputTensor); + +// const auto* params = +// reinterpret_cast(node->builtin_data); +// const int indices_dims = indices->dims->size; +// axis = (params->axis == -1) ? indices_dims : params->axis; +// output_dims = indices_dims + 1; +// dtype = on_value->type; +// } + +// const TfLiteTensor* indices; +// const TfLiteTensor* depth; +// const TfLiteTensor* on_value; +// const TfLiteTensor* off_value; +// TfLiteTensor* output; +// int axis; +// int output_dims; +// TfLiteType dtype; +// }; + +// template +// void OneHotComputeImpl(const OneHotContext& op_context) { +// // prefix_dim_size == # of elements before the axis +// // depth == # of elements per axis +// // suffix_dim_size == # of elements after the axis +// int prefix_dim_size = 1; +// for (int i = 0; i < op_context.axis; ++i) { +// prefix_dim_size *= op_context.indices->dims->data[i]; +// } +// if (prefix_dim_size == 0) { +// // If indices tensor is degenerate, return a degenerate tensor, just like +// // TensorFlow does. +// return; +// } +// const int suffix_dim_size = NumElements(op_context.indices) / +// prefix_dim_size; const int depth = *op_context.depth->data.i32; + +// const T on_value = *GetTensorData(op_context.on_value); +// const T off_value = *GetTensorData(op_context.off_value); + +// // View the indices as a matrix of size: +// // prefix_dim_size x suffix_dim_size +// // View the output as a matrix of size: +// // prefix_dim_size x depth x suffix_dim_size +// // Then the output is: +// // output(i, j, k) == (indices(i, k) == j) ? on : off +// T* output = GetTensorData(op_context.output); +// const TI* indices = GetTensorData(op_context.indices); +// for (int i = 0; i < prefix_dim_size; ++i) { +// for (int j = 0; j < depth; ++j) { +// for (int k = 0; k < suffix_dim_size; ++k, ++output) { +// *output = static_cast(indices[i * suffix_dim_size + k]) == j +// ? on_value +// : off_value; +// } +// } +// } +// } + +// template +// void OneHotCompute(const OneHotContext& op_context) { +// if (op_context.indices->type == kTfLiteInt64) { +// OneHotComputeImpl(op_context); +// } else { +// OneHotComputeImpl(op_context); +// } +// } + +// TfLiteStatus ResizeOutputTensor(TfLiteContext* context, +// const OneHotContext& op_context) { +// TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); +// TfLiteIntArray* output_size = TfLiteIntArrayCreate(op_context.output_dims); +// for (int i = 0; i < op_context.output_dims; ++i) { +// if (i < op_context.axis) { +// output_size->data[i] = op_context.indices->dims->data[i]; +// } else if (i == op_context.axis) { +// output_size->data[i] = *op_context.depth->data.i32; +// } else { +// output_size->data[i] = op_context.indices->dims->data[i - 1]; +// } +// } +// return context->ResizeTensor(context, op_context.output, output_size); +// } + +// TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +// TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); +// TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + +// OneHotContext op_context{context, node}; +// TF_LITE_ENSURE(context, op_context.output != nullptr); +// switch (op_context.dtype) { +// // TODO(b/111744875): Support uint8 and quantization. +// case kTfLiteFloat32: +// case kTfLiteInt16: +// case kTfLiteInt32: +// case kTfLiteInt64: +// case kTfLiteInt8: +// case kTfLiteUInt8: +// case kTfLiteBool: +// op_context.output->type = op_context.dtype; +// break; +// default: +// TF_LITE_KERNEL_LOG(context, "Unknown output data type: %s", +// TfLiteTypeGetName(op_context.dtype)); +// return kTfLiteError; +// } + +// TF_LITE_ENSURE(context, op_context.indices->type == kTfLiteInt32 || +// op_context.indices->type == kTfLiteInt64); +// TF_LITE_ENSURE(context, op_context.axis >= 0 && +// op_context.axis < op_context.output_dims); +// TF_LITE_ENSURE_EQ(context, NumElements(op_context.depth), 1); +// TF_LITE_ENSURE_EQ(context, NumElements(op_context.on_value), 1); +// TF_LITE_ENSURE_EQ(context, NumElements(op_context.off_value), 1); +// TF_LITE_ENSURE_TYPES_EQ(context, op_context.on_value->type, +// op_context.dtype); TF_LITE_ENSURE_TYPES_EQ(context, +// op_context.off_value->type, +// op_context.dtype); + +// if (!IsConstantOrPersistentTensor(op_context.depth)) { +// SetTensorToDynamic(op_context.output); +// return kTfLiteOk; +// } + +// return ResizeOutputTensor(context, op_context); +// } + +// TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +// OneHotContext op_context{context, node}; + +// if (IsDynamicTensor(op_context.output)) { +// TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); +// } + +// switch (op_context.output->type) { +// case kTfLiteFloat32: +// OneHotCompute(op_context); +// break; +// case kTfLiteInt32: +// OneHotCompute(op_context); +// break; +// case kTfLiteInt64: +// OneHotCompute(op_context); +// break; +// case kTfLiteInt8: +// OneHotCompute(op_context); +// break; +// case kTfLiteUInt8: +// OneHotCompute(op_context); +// break; +// case kTfLiteBool: +// OneHotCompute(op_context); +// break; +// default: +// return kTfLiteError; +// } + +// return kTfLiteOk; +// } + +// } // namespace one_hot + +// TfLiteRegistration* Register_ONE_HOT() { +// static TfLiteRegistration r = { +// nullptr, +// nullptr, +// one_hot::Prepare, +// one_hot::Eval, +// }; +// return &r; +// } } // namespace one_hot - -TfLiteRegistration* Register_ONE_HOT() { - static TfLiteRegistration r = { - nullptr, - nullptr, - one_hot::Prepare, - one_hot::Eval, - }; - return &r; -} - } // namespace builtin } // namespace ops } // namespace tflite \ No newline at end of file diff --git a/tensorflow/lite/micro/kernels/one_hot.h b/tensorflow/lite/micro/kernels/one_hot.h new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 89f27d18581..5395fcd9031 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -13,189 +13,55 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include -#include +#include "tensorflow/lite/micro/kernels/one_hot.cc" + #include #include #include #include -#include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/micro/testing/micro_test.h" +#include "tensorflow/lite/micro/testing/one_hot_test_model_data.cc" #include "tensorflow/lite/schema/schema_generated.h" -namespace tflite { -namespace { - -using ::testing::ElementsAreArray; - -template -class OneHotOpModel : public SingleOpModel { - public: - OneHotOpModel(std::initializer_list input_shape, int depth_value, - TensorType dtype, int axis = -1, T on_value = 1, - T off_value = 0, TensorType indices_type = TensorType_INT32) { - indices_ = AddInput(indices_type); - int depth = AddInput(TensorType_INT32); - int on = AddInput(dtype); - int off = AddInput(dtype); - output_ = AddOutput(dtype); - SetBuiltinOp(BuiltinOperator_ONE_HOT, BuiltinOptions_OneHotOptions, - CreateOneHotOptions(builder_, axis).Union()); - BuildInterpreter({input_shape}); - - PopulateTensor(depth, {depth_value}); - PopulateTensor(on, {on_value}); - PopulateTensor(off, {off_value}); - } - - template - void SetIndices(std::initializer_list data) { - PopulateTensor(indices_, data); - } - - TfLiteStatus InvokeWithResult() { return interpreter_->Invoke(); } - - int32_t GetOutputSize() { return GetTensorSize(output_); } - std::vector GetOutput() { return ExtractVector(output_); } - std::vector GetOutputShape() { return GetTensorShape(output_); } - - private: - int indices_; - int output_; -}; - -TEST(OneHotOpTest, BasicFloat) { - const int depth = 3; - OneHotOpModel model({3}, depth, TensorType_FLOAT32); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), - Pointwise(FloatingPointEq(), - {1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f})); -} - -TEST(OneHotOpTest, BasicInt) { - const int depth = 3; - OneHotOpModel model({3}, depth, TensorType_INT32); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); -} - -TEST(OneHotOpTest, BasicInt8) { - const int depth = 3; - OneHotOpModel model({3}, depth, TensorType_INT8); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); -} +using tflite::MicroInterpreter; +using tflite::MicroMutableOpResolver; +using tflite::Model; -TEST(OneHotOpTest, BasicUint8) { - const int depth = 3; - OneHotOpModel model({3}, depth, TensorType_UINT8); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); +extern "C" TfLiteRegistration_V1* Register_ONE_HOT(); - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); +extern "C" { +extern const unsigned char g_one_hot_basic_float_model[]; +extern const int g_one_hot_basic_float_model_len; } -TEST(OneHotOpTest, BasicBool) { - const int depth = 3; - OneHotOpModel model({3}, depth, TensorType_BOOL); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), - ElementsAreArray({true, false, false, false, true, false, false, - false, true})); -} - -TEST(OneHotOpTest, SmallDepth) { - const int depth = 1; - OneHotOpModel model({3}, depth, TensorType_INT32); - model.SetIndices({0, 1, 2}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); +namespace tflite { +namespace { - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 1})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0})); -} +TF_LITE_MICRO_TESTS_BEGIN -TEST(OneHotOpTest, BigDepth) { - const int depth = 4; - OneHotOpModel model({2}, depth, TensorType_INT32); - model.SetIndices({0, 1}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); +TF_LITE_MICRO_TEST(OneHotBasicFloat) { + const Model* model = tflite::GetModel(g_one_hot_basic_float_model); + MicroMutableOpResolver<1> resolver; - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 0, 1, 0, 0})); -} + // TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors()); -TEST(OneHotOpTest, OnOffValues) { - const int depth = 3; - const int axis = -1; - const int on = 5; - const int off = 0; - OneHotOpModel model({4}, depth, TensorType_INT32, axis, on, off); - model.SetIndices({0, 2, -1, 1}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({4, 3})); - EXPECT_THAT(model.GetOutput(), - ElementsAreArray({5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 5, 0})); -} + // TfLiteTensor* indices = interpreter.input(0); + // indices->data.i32[0] = 0; + // indices->data.i32[1] = 1; + // indices->data.i32[2] = 2; -TEST(OneHotOpTest, ZeroAxis) { - const int depth = 3; - const int axis = 0; - const int on = 5; - const int off = 0; - OneHotOpModel model({4}, depth, TensorType_INT32, axis, on, off); - model.SetIndices({0, 2, -1, 1}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 4})); - EXPECT_THAT(model.GetOutput(), - ElementsAreArray({5, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0})); -} + // TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke()); -TEST(OneHotOpTest, MultiDimensionalIndices) { - const int depth = 3; - const int axis = -1; - const float on = 2; - const float off = 0; - OneHotOpModel model({2, 2}, depth, TensorType_FLOAT32, axis, on, off); - model.SetIndices({0, 2, 1, -1}); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 3})); - EXPECT_THAT(model.GetOutput(), - ElementsAreArray({2, 0, 0, 0, 0, 2, 0, 2, 0, 0, 0, 0})); + // TfLiteTensor* output = interpreter.output(0); + // float* out = output->data.f; + // for 루프로 기대값 비교 } -TEST(OneHotOpTest, Int64Indices) { - const int depth = 3; - const int axis = -1; - const int on = 1; - const int off = 0; - OneHotOpModel model({3}, depth, TensorType_INT32, axis, on, off, - TensorType_INT64); - std::initializer_list indices = {0, 1, 2}; - model.SetIndices(indices); - ASSERT_EQ(model.Invoke(), kTfLiteOk); - - EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 3})); - EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0, 1, 0, 0, 0, 1})); -} +TF_LITE_MICRO_TESTS_END } // namespace } // namespace tflite \ No newline at end of file diff --git a/tensorflow/lite/micro/testing/one_hot_test_model_data.cc b/tensorflow/lite/micro/testing/one_hot_test_model_data.cc new file mode 100644 index 00000000000..e56baca28ac --- /dev/null +++ b/tensorflow/lite/micro/testing/one_hot_test_model_data.cc @@ -0,0 +1,20 @@ +// one_hot_test_model_data.cc 같은 별도 파일로 두면 좋음 + +#include + +extern "C" { + +// 그냥 더미 바이트들 (유효한 TFLite 모델이 아님) +const unsigned char g_one_hot_basic_float_model[] = { + // FlatBuffer signature 자리에는 보통 'T','F','L','3' 가 오지만 + // 여기서는 진짜 모델을 만들지 않았으니 그냥 대충 채워둔 상태입니다. + 0x54, 0x46, 0x4C, 0x33, // 'T','F','L','3' 비슷하게 맞춰줌 + 0x00, 0x00, 0x00, 0x00, // 나머지는 전부 0 + 0x00, 0x00, 0x00, 0x00, +}; + +const int g_one_hot_basic_float_model_len = + sizeof(g_one_hot_basic_float_model) / + sizeof(g_one_hot_basic_float_model[0]); + +} // extern "C" From 2795ddfcba560d356c0ab459b648cf68047c3ccd Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:00 +0900 Subject: [PATCH 03/13] Test Passed version --- tensorflow/lite/micro/kernels/one_hot.cc | 447 +++++++++++++---------- tensorflow/lite/micro/one_hot_test.cc | 10 +- 2 files changed, 254 insertions(+), 203 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index 60a6efef9e5..f46d08617d6 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -12,213 +12,266 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ + #include -#include "tensorflow/lite/core/c/builtin_op_data.h" -#include "tensorflow/lite/core/c/common.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { namespace ops { -namespace builtin { +namespace micro { namespace one_hot { -TfLiteRegistration_V1* Register_ONE_HOT() { - static TfLiteRegistration_V1 r = {/*init, free, prepare, eval*/}; - return &r; +constexpr int kIndicesTensor = 0; +constexpr int kDepthTensor = 1; +constexpr int kOnValueTensor = 2; +constexpr int kOffValueTensor = 3; +constexpr int kOutputTensor = 0; + +namespace { // 로컬 유틸 함수들 + +inline const TfLiteTensor* GetInput(TfLiteContext* context, + const TfLiteNode* node, int index) { + return &context->tensors[node->inputs->data[index]]; +} + +inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, + int index) { + return &context->tensors[node->outputs->data[index]]; } -// constexpr int kIndicesTensor = 0; -// constexpr int kDepthTensor = 1; -// constexpr int kOnValueTensor = 2; -// constexpr int kOffValueTensor = 3; -// constexpr int kOutputTensor = 0; - -// // Convenience utility for destructuring a node into the appropriate tensors -// and -// // data for the op. Note that this destructuring is quite cheap, so we can -// avoid -// // allocating op-specific, persistent data on the heap. -// struct OneHotContext { -// OneHotContext(TfLiteContext* context, TfLiteNode* node) { -// indices = GetInput(context, node, kIndicesTensor); -// depth = GetInput(context, node, kDepthTensor); -// on_value = GetInput(context, node, kOnValueTensor); -// off_value = GetInput(context, node, kOffValueTensor); -// output = GetOutput(context, node, kOutputTensor); - -// const auto* params = -// reinterpret_cast(node->builtin_data); -// const int indices_dims = indices->dims->size; -// axis = (params->axis == -1) ? indices_dims : params->axis; -// output_dims = indices_dims + 1; -// dtype = on_value->type; -// } - -// const TfLiteTensor* indices; -// const TfLiteTensor* depth; -// const TfLiteTensor* on_value; -// const TfLiteTensor* off_value; -// TfLiteTensor* output; -// int axis; -// int output_dims; -// TfLiteType dtype; -// }; - -// template -// void OneHotComputeImpl(const OneHotContext& op_context) { -// // prefix_dim_size == # of elements before the axis -// // depth == # of elements per axis -// // suffix_dim_size == # of elements after the axis -// int prefix_dim_size = 1; -// for (int i = 0; i < op_context.axis; ++i) { -// prefix_dim_size *= op_context.indices->dims->data[i]; -// } -// if (prefix_dim_size == 0) { -// // If indices tensor is degenerate, return a degenerate tensor, just like -// // TensorFlow does. -// return; -// } -// const int suffix_dim_size = NumElements(op_context.indices) / -// prefix_dim_size; const int depth = *op_context.depth->data.i32; - -// const T on_value = *GetTensorData(op_context.on_value); -// const T off_value = *GetTensorData(op_context.off_value); - -// // View the indices as a matrix of size: -// // prefix_dim_size x suffix_dim_size -// // View the output as a matrix of size: -// // prefix_dim_size x depth x suffix_dim_size -// // Then the output is: -// // output(i, j, k) == (indices(i, k) == j) ? on : off -// T* output = GetTensorData(op_context.output); -// const TI* indices = GetTensorData(op_context.indices); -// for (int i = 0; i < prefix_dim_size; ++i) { -// for (int j = 0; j < depth; ++j) { -// for (int k = 0; k < suffix_dim_size; ++k, ++output) { -// *output = static_cast(indices[i * suffix_dim_size + k]) == j -// ? on_value -// : off_value; -// } -// } -// } -// } - -// template -// void OneHotCompute(const OneHotContext& op_context) { -// if (op_context.indices->type == kTfLiteInt64) { -// OneHotComputeImpl(op_context); -// } else { -// OneHotComputeImpl(op_context); -// } -// } - -// TfLiteStatus ResizeOutputTensor(TfLiteContext* context, -// const OneHotContext& op_context) { -// TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); -// TfLiteIntArray* output_size = TfLiteIntArrayCreate(op_context.output_dims); -// for (int i = 0; i < op_context.output_dims; ++i) { -// if (i < op_context.axis) { -// output_size->data[i] = op_context.indices->dims->data[i]; -// } else if (i == op_context.axis) { -// output_size->data[i] = *op_context.depth->data.i32; -// } else { -// output_size->data[i] = op_context.indices->dims->data[i - 1]; -// } -// } -// return context->ResizeTensor(context, op_context.output, output_size); -// } - -// TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { -// TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); -// TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - -// OneHotContext op_context{context, node}; -// TF_LITE_ENSURE(context, op_context.output != nullptr); -// switch (op_context.dtype) { -// // TODO(b/111744875): Support uint8 and quantization. -// case kTfLiteFloat32: -// case kTfLiteInt16: -// case kTfLiteInt32: -// case kTfLiteInt64: -// case kTfLiteInt8: -// case kTfLiteUInt8: -// case kTfLiteBool: -// op_context.output->type = op_context.dtype; -// break; -// default: -// TF_LITE_KERNEL_LOG(context, "Unknown output data type: %s", -// TfLiteTypeGetName(op_context.dtype)); -// return kTfLiteError; -// } - -// TF_LITE_ENSURE(context, op_context.indices->type == kTfLiteInt32 || -// op_context.indices->type == kTfLiteInt64); -// TF_LITE_ENSURE(context, op_context.axis >= 0 && -// op_context.axis < op_context.output_dims); -// TF_LITE_ENSURE_EQ(context, NumElements(op_context.depth), 1); -// TF_LITE_ENSURE_EQ(context, NumElements(op_context.on_value), 1); -// TF_LITE_ENSURE_EQ(context, NumElements(op_context.off_value), 1); -// TF_LITE_ENSURE_TYPES_EQ(context, op_context.on_value->type, -// op_context.dtype); TF_LITE_ENSURE_TYPES_EQ(context, -// op_context.off_value->type, -// op_context.dtype); - -// if (!IsConstantOrPersistentTensor(op_context.depth)) { -// SetTensorToDynamic(op_context.output); -// return kTfLiteOk; -// } - -// return ResizeOutputTensor(context, op_context); -// } - -// TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { -// OneHotContext op_context{context, node}; - -// if (IsDynamicTensor(op_context.output)) { -// TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); -// } - -// switch (op_context.output->type) { -// case kTfLiteFloat32: -// OneHotCompute(op_context); -// break; -// case kTfLiteInt32: -// OneHotCompute(op_context); -// break; -// case kTfLiteInt64: -// OneHotCompute(op_context); -// break; -// case kTfLiteInt8: -// OneHotCompute(op_context); -// break; -// case kTfLiteUInt8: -// OneHotCompute(op_context); -// break; -// case kTfLiteBool: -// OneHotCompute(op_context); -// break; -// default: -// return kTfLiteError; -// } - -// return kTfLiteOk; -// } - -// } // namespace one_hot - -// TfLiteRegistration* Register_ONE_HOT() { -// static TfLiteRegistration r = { -// nullptr, -// nullptr, -// one_hot::Prepare, -// one_hot::Eval, -// }; -// return &r; -// } +inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } + +inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } + +// Tensor 전체 element 개수 계산 +inline int NumElements(const TfLiteTensor* t) { + int count = 1; + for (int i = 0; i < t->dims->size; ++i) { + count *= t->dims->data[i]; + } + return count; +} + +// 동적 텐서인지 확인 +inline bool IsDynamicTensor(const TfLiteTensor* tensor) { + return tensor->allocation_type == kTfLiteDynamic; +} + +// 상수/퍼시스턴트 텐서인지 확인 +inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) { + return tensor->allocation_type == kTfLiteMmapRo || + tensor->allocation_type == kTfLiteArenaRwPersistent; +} + +// 텐서를 동적 텐서로 마킹 +inline void SetTensorToDynamic(TfLiteTensor* tensor) { + tensor->allocation_type = kTfLiteDynamic; +} + +} // namespace + +// Convenience utility for destructuring a node into the appropriate tensors and +// data for the op. Note that this destructuring is quite cheap, so we can avoid +// allocating op-specific, persistent data on the heap. +struct OneHotContext { + OneHotContext(TfLiteContext* context, TfLiteNode* node) { + indices = GetInput(context, node, kIndicesTensor); + depth = GetInput(context, node, kDepthTensor); + on_value = GetInput(context, node, kOnValueTensor); + off_value = GetInput(context, node, kOffValueTensor); + output = GetOutput(context, node, kOutputTensor); + + const auto* params = + reinterpret_cast(node->builtin_data); + const int indices_dims = indices->dims->size; + axis = (params->axis == -1) ? indices_dims : params->axis; + output_dims = indices_dims + 1; + dtype = on_value->type; + } + + const TfLiteTensor* indices; + const TfLiteTensor* depth; + const TfLiteTensor* on_value; + const TfLiteTensor* off_value; + TfLiteTensor* output; + int axis; + int output_dims; + TfLiteType dtype; +}; + +template +void OneHotComputeImpl(const OneHotContext& op_context) { + // prefix_dim_size == # of elements before the axis + // depth == # of elements per axis + // suffix_dim_size == # of elements after the axis + int prefix_dim_size = 1; + for (int i = 0; i < op_context.axis; ++i) { + prefix_dim_size *= op_context.indices->dims->data[i]; + } + if (prefix_dim_size == 0) { + // If indices tensor is degenerate, return a degenerate tensor, just like + // TensorFlow does. + return; + } + + const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; + const int depth = *op_context.depth->data.i32; + + const T on_value = *tflite::GetTensorData(op_context.on_value); + const T off_value = *tflite::GetTensorData(op_context.off_value); + + // View the indices as a matrix of size: + // prefix_dim_size x suffix_dim_size + // View the output as a matrix of size: + // prefix_dim_size x depth x suffix_dim_size + // Then the output is: + // output(i, j, k) == (indices(i, k) == j) ? on : off + T* output = tflite::GetTensorData(op_context.output); + const TI* indices = tflite::GetTensorData(op_context.indices); + + for (int i = 0; i < prefix_dim_size; ++i) { + for (int j = 0; j < depth; ++j) { + for (int k = 0; k < suffix_dim_size; ++k, ++output) { + *output = static_cast(indices[i * suffix_dim_size + k]) == j + ? on_value + : off_value; + } + } + } +} + +template +void OneHotCompute(const OneHotContext& op_context) { + if (op_context.indices->type == kTfLiteInt64) { + OneHotComputeImpl(op_context); + } else { + OneHotComputeImpl(op_context); + } +} + +TfLiteStatus ResizeOutputTensor(TfLiteContext* context, + const OneHotContext& op_context) { + TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); + + // TfLiteIntArrayCreate 대신, TFLM 스타일로 직접 AllocatePersistentBuffer 사용 + const int dims = op_context.output_dims; + // TfLiteIntArray 구조체 전체 크기 계산 + size_t bytes = sizeof(TfLiteIntArray) + sizeof(int) * (dims - 1); + + // Micro 환경에서는 malloc이 아니라 context->AllocatePersistentBuffer 써야 함 + TfLiteIntArray* output_size = reinterpret_cast( + context->AllocatePersistentBuffer(context, bytes)); + TF_LITE_ENSURE(context, output_size != nullptr); + + output_size->size = dims; + for (int i = 0; i < dims; ++i) { + if (i < op_context.axis) { + output_size->data[i] = op_context.indices->dims->data[i]; + } else if (i == op_context.axis) { + output_size->data[i] = *op_context.depth->data.i32; + } else { + output_size->data[i] = op_context.indices->dims->data[i - 1]; + } + } + + return context->ResizeTensor(context, op_context.output, output_size); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + OneHotContext op_context{context, node}; + TF_LITE_ENSURE(context, op_context.output != nullptr); + + switch (op_context.dtype) { + // TODO(b/111744875): Support uint8 and quantization. + case kTfLiteFloat32: + case kTfLiteInt16: + case kTfLiteInt32: + case kTfLiteInt64: + case kTfLiteInt8: + case kTfLiteUInt8: + case kTfLiteBool: + op_context.output->type = op_context.dtype; + break; + default: + TF_LITE_KERNEL_LOG(context, "Unknown output data type: %s", + TfLiteTypeGetName(op_context.dtype)); + return kTfLiteError; + } + + TF_LITE_ENSURE(context, op_context.indices->type == kTfLiteInt32 || + op_context.indices->type == kTfLiteInt64); + TF_LITE_ENSURE(context, op_context.axis >= 0 && + op_context.axis < op_context.output_dims); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.depth), 1); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.on_value), 1); + TF_LITE_ENSURE_EQ(context, NumElements(op_context.off_value), 1); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.on_value->type, op_context.dtype); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type, + op_context.dtype); + + if (!IsConstantOrPersistentTensor(op_context.depth)) { + SetTensorToDynamic(op_context.output); + return kTfLiteOk; + } + + return ResizeOutputTensor(context, op_context); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + OneHotContext op_context{context, node}; + + if (IsDynamicTensor(op_context.output)) { + TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); + } + + switch (op_context.output->type) { + case kTfLiteFloat32: + OneHotCompute(op_context); + break; + case kTfLiteInt32: + OneHotCompute(op_context); + break; + case kTfLiteInt64: + OneHotCompute(op_context); + break; + case kTfLiteInt8: + OneHotCompute(op_context); + break; + case kTfLiteUInt8: + OneHotCompute(op_context); + break; + case kTfLiteBool: + OneHotCompute(op_context); + break; + default: + return kTfLiteError; + } + + return kTfLiteOk; +} } // namespace one_hot -} // namespace builtin + +// TFLM 쪽에서 사용할 등록 함수 +TfLiteRegistration_V1* Register_ONE_HOT() { + static TfLiteRegistration_V1 r = {}; // 모든 필드를 0 / nullptr로 초기화 + + r.init = nullptr; + r.free = nullptr; + r.prepare = one_hot::Prepare; + r.invoke = one_hot::Eval; + // 나머지 custom_name, version, profiling_string 등은 0 / nullptr 유지 + + return &r; +} + +} // namespace micro } // namespace ops -} // namespace tflite \ No newline at end of file +} // namespace tflite diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 5395fcd9031..1a40fba7f79 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -39,13 +39,14 @@ extern const int g_one_hot_basic_float_model_len; } namespace tflite { -namespace { +namespace {} // namespace +} // namespace tflite TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(OneHotBasicFloat) { const Model* model = tflite::GetModel(g_one_hot_basic_float_model); - MicroMutableOpResolver<1> resolver; + (const void)model; // 사용한 것처럼 만들어서 unused 경고 없애기 // TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors()); @@ -61,7 +62,4 @@ TF_LITE_MICRO_TEST(OneHotBasicFloat) { // for 루프로 기대값 비교 } -TF_LITE_MICRO_TESTS_END - -} // namespace -} // namespace tflite \ No newline at end of file +TF_LITE_MICRO_TESTS_END \ No newline at end of file From 61aa04bb4cafa2a8c8669c1fa1d34c77795bd803 Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:01 +0900 Subject: [PATCH 04/13] refactoring one_hot --- tensorflow/lite/micro/kernels/one_hot.cc | 40 ++++++++---------------- tensorflow/lite/micro/kernels/one_hot.h | 17 ++++++++++ tensorflow/lite/micro/one_hot_test.cc | 2 +- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index f46d08617d6..ba3bcbc631c 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/one_hot.h" // ★ 새 헤더 + #include #include "tensorflow/lite/c/builtin_op_data.h" @@ -105,16 +107,11 @@ struct OneHotContext { template void OneHotComputeImpl(const OneHotContext& op_context) { - // prefix_dim_size == # of elements before the axis - // depth == # of elements per axis - // suffix_dim_size == # of elements after the axis int prefix_dim_size = 1; for (int i = 0; i < op_context.axis; ++i) { prefix_dim_size *= op_context.indices->dims->data[i]; } if (prefix_dim_size == 0) { - // If indices tensor is degenerate, return a degenerate tensor, just like - // TensorFlow does. return; } @@ -124,21 +121,16 @@ void OneHotComputeImpl(const OneHotContext& op_context) { const T on_value = *tflite::GetTensorData(op_context.on_value); const T off_value = *tflite::GetTensorData(op_context.off_value); - // View the indices as a matrix of size: - // prefix_dim_size x suffix_dim_size - // View the output as a matrix of size: - // prefix_dim_size x depth x suffix_dim_size - // Then the output is: - // output(i, j, k) == (indices(i, k) == j) ? on : off - T* output = tflite::GetTensorData(op_context.output); - const TI* indices = tflite::GetTensorData(op_context.indices); + T* output_data = tflite::GetTensorData(op_context.output); + const TI* indices_data = tflite::GetTensorData(op_context.indices); for (int i = 0; i < prefix_dim_size; ++i) { for (int j = 0; j < depth; ++j) { - for (int k = 0; k < suffix_dim_size; ++k, ++output) { - *output = static_cast(indices[i * suffix_dim_size + k]) == j - ? on_value - : off_value; + for (int k = 0; k < suffix_dim_size; ++k, ++output_data) { + *output_data = + static_cast(indices_data[i * suffix_dim_size + k]) == j + ? on_value + : off_value; } } } @@ -157,12 +149,9 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const OneHotContext& op_context) { TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); - // TfLiteIntArrayCreate 대신, TFLM 스타일로 직접 AllocatePersistentBuffer 사용 const int dims = op_context.output_dims; - // TfLiteIntArray 구조체 전체 크기 계산 size_t bytes = sizeof(TfLiteIntArray) + sizeof(int) * (dims - 1); - // Micro 환경에서는 malloc이 아니라 context->AllocatePersistentBuffer 써야 함 TfLiteIntArray* output_size = reinterpret_cast( context->AllocatePersistentBuffer(context, bytes)); TF_LITE_ENSURE(context, output_size != nullptr); @@ -189,7 +178,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, op_context.output != nullptr); switch (op_context.dtype) { - // TODO(b/111744875): Support uint8 and quantization. case kTfLiteFloat32: case kTfLiteInt16: case kTfLiteInt32: @@ -259,19 +247,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace one_hot -// TFLM 쪽에서 사용할 등록 함수 +// 헤더에 선언된 Register_ONE_HOT 구현 TfLiteRegistration_V1* Register_ONE_HOT() { - static TfLiteRegistration_V1 r = {}; // 모든 필드를 0 / nullptr로 초기화 - + static TfLiteRegistration_V1 r = {}; // 모든 필드를 0/NULL로 초기화 r.init = nullptr; r.free = nullptr; r.prepare = one_hot::Prepare; r.invoke = one_hot::Eval; - // 나머지 custom_name, version, profiling_string 등은 0 / nullptr 유지 - + // custom_name, version, profiling_string 등은 기본값(0/nullptr) 유지 return &r; } } // namespace micro } // namespace ops -} // namespace tflite +} // namespace tflite \ No newline at end of file diff --git a/tensorflow/lite/micro/kernels/one_hot.h b/tensorflow/lite/micro/kernels/one_hot.h index e69de29bb2d..24d5487df4b 100644 --- a/tensorflow/lite/micro/kernels/one_hot.h +++ b/tensorflow/lite/micro/kernels/one_hot.h @@ -0,0 +1,17 @@ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ONE_HOT_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ONE_HOT_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { +namespace ops { +namespace micro { + +// ONE_HOT 커널 등록 함수 (all_ops_resolver 등에서 사용) +TfLiteRegistration_V1* Register_ONE_HOT(); + +} // namespace micro +} // namespace ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ONE_HOT_H_ diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 1a40fba7f79..174e3e74200 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/micro/kernels/one_hot.cc" +#include "tensorflow/lite/micro/kernels/one_hot.h" #include From cd28badd8858760abad2eae0633ecfffb6261c74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=8B=AC=EC=A4=80=EC=84=9D?= Date: Sun, 30 Nov 2025 22:13:13 +0900 Subject: [PATCH 05/13] add test code, but get compile errors --- tensorflow/lite/micro/one_hot_test.cc | 47 ++++++++++++++++++--------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 174e3e74200..842a61d9559 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -21,10 +21,12 @@ limitations under the License. #include #include +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/one_hot.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" #include "tensorflow/lite/micro/testing/micro_test.h" -#include "tensorflow/lite/micro/testing/one_hot_test_model_data.cc" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" #include "tensorflow/lite/schema/schema_generated.h" using tflite::MicroInterpreter; @@ -45,21 +47,36 @@ namespace {} // namespace TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(OneHotBasicFloat) { + // 테스트케이스별로 추가해달라고 한 패턴 const Model* model = tflite::GetModel(g_one_hot_basic_float_model); - (const void)model; // 사용한 것처럼 만들어서 unused 경고 없애기 - - // TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors()); - - // TfLiteTensor* indices = interpreter.input(0); - // indices->data.i32[0] = 0; - // indices->data.i32[1] = 1; - // indices->data.i32[2] = 2; - - // TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke()); - - // TfLiteTensor* output = interpreter.output(0); - // float* out = output->data.f; - // for 루프로 기대값 비교 + (const void)model; // unused 경고 방지 + + // 에러 리포터 + static tflite::MicroErrorReporter micro_error_reporter; + tflite::ErrorReporter* error_reporter = µ_error_reporter; + + // Op 등록 (ONE_HOT만 등록) + tflite::MicroMutableOpResolver<1> resolver; + resolver.AddBuiltin(tflite::BuiltinOperator_ONE_HOT, + tflite::ops::micro::Register_ONE_HOT()); + + // 인터프리터 생성 + tflite::MicroInterpreter interpreter(model, resolver, g_tensor_arena, + kTensorArenaSize, error_reporter); + + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors()); + + // 여기서부터는 g_one_hot_basic_float_model 안에 + // indices / depth / on_value / off_value 가 어떻게 정의되어 있느냐에 따라 + // 입력을 만지거나 그냥 output만 검증하면 됩니다. + // + // 예: output[0..8] 이 [1,0,0, 0,1,0, 0,0,1] 이라고 가정하는 경우: + TfLiteTensor* output = interpreter.output(0); + float* out_data = output->data.f; + + // 실제 값은 모델에 맞게 바꾸세요. + TF_LITE_MICRO_EXPECT_EQ(9, output->dims->data[0] * output->dims->data[1]); + TF_LITE_MICRO_EXPECT_NEAR(1.f, out_data[0], 1e-5f); } TF_LITE_MICRO_TESTS_END \ No newline at end of file From 05edd109f883ba208148c768fa693ac71f5d3fed Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:01 +0900 Subject: [PATCH 06/13] mod : one hot test code --- tensorflow/lite/micro/kernels/one_hot.cc | 6 +- tensorflow/lite/micro/kernels/one_hot.h | 2 +- tensorflow/lite/micro/one_hot_test.cc | 139 ++++++++++++----------- 3 files changed, 77 insertions(+), 70 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index ba3bcbc631c..1973d4f405d 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_common.h" namespace tflite { namespace ops { @@ -248,13 +249,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace one_hot // 헤더에 선언된 Register_ONE_HOT 구현 -TfLiteRegistration_V1* Register_ONE_HOT() { - static TfLiteRegistration_V1 r = {}; // 모든 필드를 0/NULL로 초기화 +const TFLMRegistration* Register_ONE_HOT() { + static TFLMRegistration r = {}; // 모든 필드를 0/NULL로 초기화 r.init = nullptr; r.free = nullptr; r.prepare = one_hot::Prepare; r.invoke = one_hot::Eval; - // custom_name, version, profiling_string 등은 기본값(0/nullptr) 유지 return &r; } diff --git a/tensorflow/lite/micro/kernels/one_hot.h b/tensorflow/lite/micro/kernels/one_hot.h index 24d5487df4b..44bf5aac3d7 100644 --- a/tensorflow/lite/micro/kernels/one_hot.h +++ b/tensorflow/lite/micro/kernels/one_hot.h @@ -8,7 +8,7 @@ namespace ops { namespace micro { // ONE_HOT 커널 등록 함수 (all_ops_resolver 등에서 사용) -TfLiteRegistration_V1* Register_ONE_HOT(); +const TFLMRegistration* Register_ONE_HOT(); } // namespace micro } // namespace ops diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 842a61d9559..f2ed43adc78 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -1,82 +1,89 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - #include "tensorflow/lite/micro/kernels/one_hot.h" #include - -#include -#include -#include +#include #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/kernels/one_hot.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/micro/kernels/kernel_runner.h" #include "tensorflow/lite/micro/testing/micro_test.h" -#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" -using tflite::MicroInterpreter; -using tflite::MicroMutableOpResolver; -using tflite::Model; +namespace tflite { +namespace { -extern "C" TfLiteRegistration_V1* Register_ONE_HOT(); +using tflite::micro::KernelRunner; -extern "C" { -extern const unsigned char g_one_hot_basic_float_model[]; -extern const int g_one_hot_basic_float_model_len; +// dims 배열 → TfLiteIntArray 로 캐스팅 +TfLiteIntArray* IntArrayFromInts(const int* dims) { + return const_cast( + reinterpret_cast(dims)); } -namespace tflite { -namespace {} // namespace -} // namespace tflite +// int32 Tensor 생성 헬퍼 +TfLiteTensor CreateInt32Tensor(int32_t* data, TfLiteIntArray* dims) { + TfLiteTensor t; + memset(&t, 0, sizeof(TfLiteTensor)); + t.type = kTfLiteInt32; + t.dims = dims; + t.data.i32 = data; + t.allocation_type = kTfLiteMemNone; + return t; +} +} // namespace + +// ★ 여기서 main 이 자동으로 정의됩니다. TF_LITE_MICRO_TESTS_BEGIN -TF_LITE_MICRO_TEST(OneHotBasicFloat) { - // 테스트케이스별로 추가해달라고 한 패턴 - const Model* model = tflite::GetModel(g_one_hot_basic_float_model); - (const void)model; // unused 경고 방지 - - // 에러 리포터 - static tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - - // Op 등록 (ONE_HOT만 등록) - tflite::MicroMutableOpResolver<1> resolver; - resolver.AddBuiltin(tflite::BuiltinOperator_ONE_HOT, - tflite::ops::micro::Register_ONE_HOT()); - - // 인터프리터 생성 - tflite::MicroInterpreter interpreter(model, resolver, g_tensor_arena, - kTensorArenaSize, error_reporter); - - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors()); - - // 여기서부터는 g_one_hot_basic_float_model 안에 - // indices / depth / on_value / off_value 가 어떻게 정의되어 있느냐에 따라 - // 입력을 만지거나 그냥 output만 검증하면 됩니다. - // - // 예: output[0..8] 이 [1,0,0, 0,1,0, 0,0,1] 이라고 가정하는 경우: - TfLiteTensor* output = interpreter.output(0); - float* out_data = output->data.f; - - // 실제 값은 모델에 맞게 바꾸세요. - TF_LITE_MICRO_EXPECT_EQ(9, output->dims->data[0] * output->dims->data[1]); - TF_LITE_MICRO_EXPECT_NEAR(1.f, out_data[0], 1e-5f); +TF_LITE_MICRO_TEST(OneHot_BasicInt32) { + // indices: [0,1,2], shape [3] + int indices_shape_arr[] = {1, 3}; + TfLiteIntArray* indices_shape = IntArrayFromInts(indices_shape_arr); + int32_t indices_data[3] = {0, 1, 2}; + + // depth: scalar (3) + int depth_shape_arr[] = {0}; + TfLiteIntArray* depth_shape = IntArrayFromInts(depth_shape_arr); + int32_t depth_data[1] = {3}; + + // on/off: scalar + TfLiteIntArray* scalar_shape = depth_shape; + int32_t on_value_data[1] = {1}; + int32_t off_value_data[1] = {0}; + + // output: [3,3] + int output_shape_arr[] = {2, 3, 3}; + TfLiteIntArray* output_shape = IntArrayFromInts(output_shape_arr); + int32_t output_data[9] = {0}; + + TfLiteTensor tensors[5]; + tensors[0] = CreateInt32Tensor(indices_data, indices_shape); + tensors[1] = CreateInt32Tensor(depth_data, depth_shape); + tensors[2] = CreateInt32Tensor(on_value_data, scalar_shape); + tensors[3] = CreateInt32Tensor(off_value_data, scalar_shape); + tensors[4] = CreateInt32Tensor(output_data, output_shape); + + int inputs_arr[] = {4, 0, 1, 2, 3}; + int outputs_arr[] = {1, 4}; + TfLiteIntArray* inputs = IntArrayFromInts(inputs_arr); + TfLiteIntArray* outputs = IntArrayFromInts(outputs_arr); + + const TFLMRegistration* registration = tflite::ops::micro::Register_ONE_HOT(); + + KernelRunner runner(*registration, tensors, 5, inputs, outputs, + /*builtin_data=*/nullptr, + /*error_reporter=*/nullptr); + + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); + + int32_t expected[9] = { + 1, 0, 0, 0, 1, 0, 0, 0, 1, + }; + for (int i = 0; i < 9; ++i) { + TF_LITE_MICRO_EXPECT_EQ(expected[i], output_data[i]); + } } -TF_LITE_MICRO_TESTS_END \ No newline at end of file +TF_LITE_MICRO_TESTS_END // ★ 여기까지 + +} // namespace tflite From 9d738c128dfa1d5e7dc702f7e54e84050a5f709e Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:02 +0900 Subject: [PATCH 07/13] fix : test code --- tensorflow/lite/micro/kernels/one_hot.h | 1 + tensorflow/lite/micro/one_hot_test.cc | 21 +++++++-------------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.h b/tensorflow/lite/micro/kernels/one_hot.h index 44bf5aac3d7..4593e12a178 100644 --- a/tensorflow/lite/micro/kernels/one_hot.h +++ b/tensorflow/lite/micro/kernels/one_hot.h @@ -2,6 +2,7 @@ #define TENSORFLOW_LITE_MICRO_KERNELS_ONE_HOT_H_ #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_common.h" namespace tflite { namespace ops { diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index f2ed43adc78..1a9b85c714f 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -7,18 +7,17 @@ #include "tensorflow/lite/micro/kernels/kernel_runner.h" #include "tensorflow/lite/micro/testing/micro_test.h" -namespace tflite { +// 헬퍼들은 익명 namespace (전역) 안에만 둡니다. namespace { using tflite::micro::KernelRunner; -// dims 배열 → TfLiteIntArray 로 캐스팅 +// dims 배열 → TfLiteIntArray로 캐스팅 TfLiteIntArray* IntArrayFromInts(const int* dims) { return const_cast( reinterpret_cast(dims)); } -// int32 Tensor 생성 헬퍼 TfLiteTensor CreateInt32Tensor(int32_t* data, TfLiteIntArray* dims) { TfLiteTensor t; memset(&t, 0, sizeof(TfLiteTensor)); @@ -31,27 +30,23 @@ TfLiteTensor CreateInt32Tensor(int32_t* data, TfLiteIntArray* dims) { } // namespace -// ★ 여기서 main 이 자동으로 정의됩니다. +// ★★★ 여기서부터는 절대 namespace 안에 넣지 마세요 ★★★ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(OneHot_BasicInt32) { - // indices: [0,1,2], shape [3] - int indices_shape_arr[] = {1, 3}; + int indices_shape_arr[] = {1, 3}; // rank=1, dim=3 TfLiteIntArray* indices_shape = IntArrayFromInts(indices_shape_arr); int32_t indices_data[3] = {0, 1, 2}; - // depth: scalar (3) - int depth_shape_arr[] = {0}; + int depth_shape_arr[] = {0}; // scalar TfLiteIntArray* depth_shape = IntArrayFromInts(depth_shape_arr); int32_t depth_data[1] = {3}; - // on/off: scalar TfLiteIntArray* scalar_shape = depth_shape; int32_t on_value_data[1] = {1}; int32_t off_value_data[1] = {0}; - // output: [3,3] - int output_shape_arr[] = {2, 3, 3}; + int output_shape_arr[] = {2, 3, 3}; // [3,3] TfLiteIntArray* output_shape = IntArrayFromInts(output_shape_arr); int32_t output_data[9] = {0}; @@ -84,6 +79,4 @@ TF_LITE_MICRO_TEST(OneHot_BasicInt32) { } } -TF_LITE_MICRO_TESTS_END // ★ 여기까지 - -} // namespace tflite +TF_LITE_MICRO_TESTS_END From d20bab0f1ba38faf6779aeffe0a439e4e9a48e8f Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:02 +0900 Subject: [PATCH 08/13] todo : Error 139 --- tensorflow/lite/micro/one_hot_test.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 1a9b85c714f..88992d0993b 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -3,6 +3,7 @@ #include #include +#include "tensorflow/lite/c/builtin_op_data.h" // ★ 이거 추가 #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/micro/kernels/kernel_runner.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -64,13 +65,16 @@ TF_LITE_MICRO_TEST(OneHot_BasicInt32) { const TFLMRegistration* registration = tflite::ops::micro::Register_ONE_HOT(); + TfLiteOneHotParams params; + memset(¶ms, 0, sizeof(params)); + params.axis = -1; // 마지막 축 기준 one-hot + KernelRunner runner(*registration, tensors, 5, inputs, outputs, - /*builtin_data=*/nullptr, + /*builtin_data=*/¶ms, /*error_reporter=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); - int32_t expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1, }; From 54a9d78b3d3aaf7056974471b0dc695eb4732b34 Mon Sep 17 00:00:00 2001 From: junseokShim Date: Mon, 1 Dec 2025 22:54:46 +0900 Subject: [PATCH 09/13] test --- tensorflow/lite/micro/kernels/one_hot.cc | 41 ++++++++++++------------ 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index 1973d4f405d..3f69c9d8c94 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -150,27 +150,30 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const OneHotContext& op_context) { TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); - const int dims = op_context.output_dims; - size_t bytes = sizeof(TfLiteIntArray) + sizeof(int) * (dims - 1); + // 테스트에서는 이미 output 텐서에 shape를 세팅해 둔 상태이므로 + // 여기서는 "예상된 shape와 일치하는지만 검증"만 수행합니다. + TF_LITE_ENSURE(context, op_context.output != nullptr); + TF_LITE_ENSURE(context, op_context.output->dims != nullptr); - TfLiteIntArray* output_size = reinterpret_cast( - context->AllocatePersistentBuffer(context, bytes)); - TF_LITE_ENSURE(context, output_size != nullptr); + const int expected_dims = op_context.output_dims; + TF_LITE_ENSURE_EQ(context, op_context.output->dims->size, expected_dims); - output_size->size = dims; - for (int i = 0; i < dims; ++i) { + for (int i = 0; i < expected_dims; ++i) { + int expected_dim_i; if (i < op_context.axis) { - output_size->data[i] = op_context.indices->dims->data[i]; + expected_dim_i = op_context.indices->dims->data[i]; } else if (i == op_context.axis) { - output_size->data[i] = *op_context.depth->data.i32; + expected_dim_i = *op_context.depth->data.i32; } else { - output_size->data[i] = op_context.indices->dims->data[i - 1]; + expected_dim_i = op_context.indices->dims->data[i - 1]; } + TF_LITE_ENSURE_EQ(context, op_context.output->dims->data[i], + expected_dim_i); } - return context->ResizeTensor(context, op_context.output, output_size); + // 실제로는 아무 것도 리사이즈 하지 않음 + return kTfLiteOk; } - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -205,20 +208,18 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type, op_context.dtype); - if (!IsConstantOrPersistentTensor(op_context.depth)) { - SetTensorToDynamic(op_context.output); - return kTfLiteOk; - } - + // depth 텐서가 상수가 아니더라도, 테스트에서는 output shape를 + // 미리 지정해 두었으므로 여기서는 그냥 검증만 수행 return ResizeOutputTensor(context, op_context); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OneHotContext op_context{context, node}; - if (IsDynamicTensor(op_context.output)) { - TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); - } + // 동적 텐서 처리도 일단 생략 (테스트에서는 고정 shape) + // if (IsDynamicTensor(op_context.output)) { + // TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); + // } switch (op_context.output->type) { case kTfLiteFloat32: From 931a4c4f1881d96bbcd0f8078232cd4dfd4ba56e Mon Sep 17 00:00:00 2001 From: junseokShim Date: Mon, 1 Dec 2025 23:13:30 +0900 Subject: [PATCH 10/13] grass test --- grass.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 grass.txt diff --git a/grass.txt b/grass.txt new file mode 100644 index 00000000000..1c2899b3d78 --- /dev/null +++ b/grass.txt @@ -0,0 +1 @@ +grass From fd570e3b596a26c5c61f0f3f35e9b8a2c514bb51 Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 19:37:56 +0900 Subject: [PATCH 11/13] fix : one_hot.cc --- tensorflow/lite/micro/kernels/one_hot.cc | 126 ++++++++------------ tensorflow/lite/micro/one_hot_test.cc | 145 +++++++++++++---------- 2 files changed, 133 insertions(+), 138 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index 3f69c9d8c94..bf0f69f77f6 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,14 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/one_hot.h" // ★ 새 헤더 - #include #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" #include "tensorflow/lite/micro/micro_common.h" @@ -35,58 +31,27 @@ constexpr int kOffValueTensor = 3; constexpr int kOutputTensor = 0; namespace { // 로컬 유틸 함수들 - -inline const TfLiteTensor* GetInput(TfLiteContext* context, - const TfLiteNode* node, int index) { - return &context->tensors[node->inputs->data[index]]; -} - -inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, - int index) { - return &context->tensors[node->outputs->data[index]]; -} - -inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } - -inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } - -// Tensor 전체 element 개수 계산 -inline int NumElements(const TfLiteTensor* t) { +inline int NumElements(const TfLiteEvalTensor* t) { int count = 1; + // TfLiteEvalTensor의 dims는 TfLiteIntArray* 타입입니다. for (int i = 0; i < t->dims->size; ++i) { count *= t->dims->data[i]; } return count; } - -// 동적 텐서인지 확인 -inline bool IsDynamicTensor(const TfLiteTensor* tensor) { - return tensor->allocation_type == kTfLiteDynamic; -} - -// 상수/퍼시스턴트 텐서인지 확인 -inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) { - return tensor->allocation_type == kTfLiteMmapRo || - tensor->allocation_type == kTfLiteArenaRwPersistent; -} - -// 텐서를 동적 텐서로 마킹 -inline void SetTensorToDynamic(TfLiteTensor* tensor) { - tensor->allocation_type = kTfLiteDynamic; -} - } // namespace -// Convenience utility for destructuring a node into the appropriate tensors and -// data for the op. Note that this destructuring is quite cheap, so we can avoid -// allocating op-specific, persistent data on the heap. +// TfLiteNode에서 입력 (indices, depth, on_value, off_value) 및 출력 텐서 +// (output) 를 가져옴 params->axis 를 읽어 실제로 Depth 차원이 들어갈 위치 +// (Axis) 계산 Prepare과 Eval 함수 내에서 잠시 생성되었다가 사라짐 → Stack +// memory 사용 효율적 struct OneHotContext { OneHotContext(TfLiteContext* context, TfLiteNode* node) { - indices = GetInput(context, node, kIndicesTensor); - depth = GetInput(context, node, kDepthTensor); - on_value = GetInput(context, node, kOnValueTensor); - off_value = GetInput(context, node, kOffValueTensor); - output = GetOutput(context, node, kOutputTensor); + indices = tflite::micro::GetEvalInput(context, node, kIndicesTensor); + depth = tflite::micro::GetEvalInput(context, node, kDepthTensor); + on_value = tflite::micro::GetEvalInput(context, node, kOnValueTensor); + off_value = tflite::micro::GetEvalInput(context, node, kOffValueTensor); + output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); const auto* params = reinterpret_cast(node->builtin_data); @@ -96,16 +61,19 @@ struct OneHotContext { dtype = on_value->type; } - const TfLiteTensor* indices; - const TfLiteTensor* depth; - const TfLiteTensor* on_value; - const TfLiteTensor* off_value; - TfLiteTensor* output; + const TfLiteEvalTensor* indices; + const TfLiteEvalTensor* depth; // 새로 생기는 One-hot 차원 크기 + const TfLiteEvalTensor* on_value; + const TfLiteEvalTensor* off_value; + TfLiteEvalTensor* output; + int axis; int output_dims; TfLiteType dtype; }; +// 실제 연산 수행 함수 +// template void OneHotComputeImpl(const OneHotContext& op_context) { int prefix_dim_size = 1; @@ -116,14 +84,17 @@ void OneHotComputeImpl(const OneHotContext& op_context) { return; } - const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; + const RuntimeShape indices_shape = + tflite::micro::GetTensorShape(op_context.indices); + const int suffix_dim_size = indices_shape.FlatSize() / prefix_dim_size; + const int depth = *op_context.depth->data.i32; - const T on_value = *tflite::GetTensorData(op_context.on_value); - const T off_value = *tflite::GetTensorData(op_context.off_value); + const T on_value = *tflite::micro::GetTensorData(op_context.on_value); + const T off_value = *tflite::micro::GetTensorData(op_context.off_value); - T* output_data = tflite::GetTensorData(op_context.output); - const TI* indices_data = tflite::GetTensorData(op_context.indices); + T* output_data = tflite::micro::GetTensorData(op_context.output); + const TI* indices_data = tflite::micro::GetTensorData(op_context.indices); for (int i = 0; i < prefix_dim_size; ++i) { for (int j = 0; j < depth; ++j) { @@ -150,33 +121,45 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const OneHotContext& op_context) { TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); - // 테스트에서는 이미 output 텐서에 shape를 세팅해 둔 상태이므로 - // 여기서는 "예상된 shape와 일치하는지만 검증"만 수행합니다. + // depth 데이터 읽기 + const int depth_val = + *tflite::micro::GetTensorData(op_context.depth); + TF_LITE_ENSURE(context, depth_val >= 0); + + // Output Tensor 검증 TF_LITE_ENSURE(context, op_context.output != nullptr); + TF_LITE_ENSURE(context, op_context.output->dims != nullptr); - const int expected_dims = op_context.output_dims; - TF_LITE_ENSURE_EQ(context, op_context.output->dims->size, expected_dims); + // todo + // TFLM에서는 Output Tensor의 dims가 이미 할당되어 있다고 가정합니다. + // 하지만 모델이 생성될 때 계산된 dims와 현재 depth값으로 계산한 dims가 + // 일치하는지 확인은 필요합니다. + const int expected_dims_size = op_context.output_dims; + TF_LITE_ENSURE_EQ(context, op_context.output->dims->size, expected_dims_size); - for (int i = 0; i < expected_dims; ++i) { + for (int i = 0; i < expected_dims_size; ++i) { int expected_dim_i; if (i < op_context.axis) { expected_dim_i = op_context.indices->dims->data[i]; } else if (i == op_context.axis) { - expected_dim_i = *op_context.depth->data.i32; + expected_dim_i = depth_val; } else { expected_dim_i = op_context.indices->dims->data[i - 1]; } + + // TFLM 컴파일러(Offline Memory Planner)가 할당해둔 크기와 실제 계산 크기가 + // 다르면 에러 TF_LITE_ENSURE_EQ(context, op_context.output->dims->data[i], expected_dim_i); } - // 실제로는 아무 것도 리사이즈 하지 않음 return kTfLiteOk; } + TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TF_LITE_ENSURE_EQ(context, node->inputs->size, 4); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); OneHotContext op_context{context, node}; TF_LITE_ENSURE(context, op_context.output != nullptr); @@ -216,11 +199,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OneHotContext op_context{context, node}; - // 동적 텐서 처리도 일단 생략 (테스트에서는 고정 shape) - // if (IsDynamicTensor(op_context.output)) { - // TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); - // } - switch (op_context.output->type) { case kTfLiteFloat32: OneHotCompute(op_context); @@ -251,11 +229,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // 헤더에 선언된 Register_ONE_HOT 구현 const TFLMRegistration* Register_ONE_HOT() { - static TFLMRegistration r = {}; // 모든 필드를 0/NULL로 초기화 - r.init = nullptr; - r.free = nullptr; + static TFLMRegistration r = {}; + r.prepare = one_hot::Prepare; r.invoke = one_hot::Eval; + return &r; } diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index 88992d0993b..dac03cdd569 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -1,86 +1,103 @@ -#include "tensorflow/lite/micro/kernels/one_hot.h" - -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" // ★ 이거 추가 +#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" -// 헬퍼들은 익명 namespace (전역) 안에만 둡니다. +namespace tflite { +namespace ops { +namespace micro { + +const TFLMRegistration* Register_ONE_HOT(); +} // namespace micro +} // namespace ops +} // namespace tflite + +namespace tflite { +namespace testing { namespace { -using tflite::micro::KernelRunner; +// OneHot 연산 테스트를 위한 헬퍼 함수 +template +void TestOneHot(const int* indices_dims, const int32_t* indices_data, + const int* depth_dims, const int32_t* depth_data, + const int* on_dims, const T* on_data, const int* off_dims, + const T* off_data, const int* output_dims, + const T* expected_output_data, T* output_data, int axis = -1) { + // 1. 텐서 설정 + TfLiteIntArray* in_dims = IntArrayFromInts(indices_dims); + TfLiteIntArray* d_dims = IntArrayFromInts(depth_dims); + TfLiteIntArray* on_val_dims = IntArrayFromInts(on_dims); + TfLiteIntArray* off_val_dims = IntArrayFromInts(off_dims); + TfLiteIntArray* out_dims = IntArrayFromInts(output_dims); + + const int output_dims_count = ElementCount(*out_dims); + + // 2. 입력 텐서 생성 + constexpr int inputs_size = 4; + constexpr int outputs_size = 1; + constexpr int tensors_size = inputs_size + outputs_size; + TfLiteTensor tensors[tensors_size] = { + CreateTensor(indices_data, in_dims), CreateTensor(depth_data, d_dims), + CreateTensor(on_data, on_val_dims), CreateTensor(off_data, off_val_dims), + CreateTensor(output_data, out_dims), // 출력 텐서 (데이터는 비워둠) + }; + + // 3. 파라미터 설정 + TfLiteOneHotParams builtin_data = {axis}; -// dims 배열 → TfLiteIntArray로 캐스팅 -TfLiteIntArray* IntArrayFromInts(const int* dims) { - return const_cast( - reinterpret_cast(dims)); -} + // 4. KernelRunner 실행 + int inputs_array_data[] = {4, 0, 1, 2, 3}; // indices, depth, on, off + TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data); + int outputs_array_data[] = {1, 4}; // output tensor index + TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); + + // 등록 함수 이름은 구현하신 이름으로 변경 (예: + // tflite::ops::micro::Register_ONE_HOT) + const TFLMRegistration registration = *tflite::ops::micro::Register_ONE_HOT(); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); + + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); -TfLiteTensor CreateInt32Tensor(int32_t* data, TfLiteIntArray* dims) { - TfLiteTensor t; - memset(&t, 0, sizeof(TfLiteTensor)); - t.type = kTfLiteInt32; - t.dims = dims; - t.data.i32 = data; - t.allocation_type = kTfLiteMemNone; - return t; + // 5. 결과 검증 + for (int i = 0; i < output_dims_count; ++i) { + TF_LITE_MICRO_EXPECT_EQ(expected_output_data[i], output_data[i]); + } } } // namespace +} // namespace testing +} // namespace tflite -// ★★★ 여기서부터는 절대 namespace 안에 넣지 마세요 ★★★ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(OneHot_BasicInt32) { - int indices_shape_arr[] = {1, 3}; // rank=1, dim=3 - TfLiteIntArray* indices_shape = IntArrayFromInts(indices_shape_arr); - int32_t indices_data[3] = {0, 1, 2}; - - int depth_shape_arr[] = {0}; // scalar - TfLiteIntArray* depth_shape = IntArrayFromInts(depth_shape_arr); - int32_t depth_data[1] = {3}; - - TfLiteIntArray* scalar_shape = depth_shape; - int32_t on_value_data[1] = {1}; - int32_t off_value_data[1] = {0}; - - int output_shape_arr[] = {2, 3, 3}; // [3,3] - TfLiteIntArray* output_shape = IntArrayFromInts(output_shape_arr); - int32_t output_data[9] = {0}; + // Indices: [0, 1, 2] + const int indices_dims[] = {1, 3}; + const int32_t indices_data[] = {0, 1, 2}; - TfLiteTensor tensors[5]; - tensors[0] = CreateInt32Tensor(indices_data, indices_shape); - tensors[1] = CreateInt32Tensor(depth_data, depth_shape); - tensors[2] = CreateInt32Tensor(on_value_data, scalar_shape); - tensors[3] = CreateInt32Tensor(off_value_data, scalar_shape); - tensors[4] = CreateInt32Tensor(output_data, output_shape); + // Depth: 3 + const int depth_dims[] = {1, 1}; + const int32_t depth_data[] = {3}; - int inputs_arr[] = {4, 0, 1, 2, 3}; - int outputs_arr[] = {1, 4}; - TfLiteIntArray* inputs = IntArrayFromInts(inputs_arr); - TfLiteIntArray* outputs = IntArrayFromInts(outputs_arr); + // On: 1, Off: 0 + const int on_dims[] = {1, 1}; + const int32_t on_data[] = {1}; + const int off_dims[] = {1, 1}; + const int32_t off_data[] = {0}; - const TFLMRegistration* registration = tflite::ops::micro::Register_ONE_HOT(); + // Output: [3, 3] -> Identity Matrix + const int output_dims[] = {2, 3, 3}; + const int32_t expected_output[] = {1, 0, 0, 0, 1, 0, 0, 0, 1}; - TfLiteOneHotParams params; - memset(¶ms, 0, sizeof(params)); - params.axis = -1; // 마지막 축 기준 one-hot + int32_t output_data[9]; // 결과 받을 버퍼 - KernelRunner runner(*registration, tensors, 5, inputs, outputs, - /*builtin_data=*/¶ms, - /*error_reporter=*/nullptr); - - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); - int32_t expected[9] = { - 1, 0, 0, 0, 1, 0, 0, 0, 1, - }; - for (int i = 0; i < 9; ++i) { - TF_LITE_MICRO_EXPECT_EQ(expected[i], output_data[i]); - } + tflite::testing::TestOneHot(indices_dims, indices_data, depth_dims, + depth_data, on_dims, on_data, off_dims, off_data, + output_dims, expected_output, output_data); } -TF_LITE_MICRO_TESTS_END +TF_LITE_MICRO_TESTS_END \ No newline at end of file From f9fb4be01438396728d0b43bfc7b63fdaacffe9d Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 19:44:17 +0900 Subject: [PATCH 12/13] translate comments to Eng --- tensorflow/lite/micro/kernels/one_hot.cc | 37 +++++++++++------------- tensorflow/lite/micro/kernels/one_hot.h | 2 +- tensorflow/lite/micro/one_hot_test.cc | 17 +++++------ 3 files changed, 26 insertions(+), 30 deletions(-) diff --git a/tensorflow/lite/micro/kernels/one_hot.cc b/tensorflow/lite/micro/kernels/one_hot.cc index bf0f69f77f6..472642e76a2 100644 --- a/tensorflow/lite/micro/kernels/one_hot.cc +++ b/tensorflow/lite/micro/kernels/one_hot.cc @@ -30,10 +30,9 @@ constexpr int kOnValueTensor = 2; constexpr int kOffValueTensor = 3; constexpr int kOutputTensor = 0; -namespace { // 로컬 유틸 함수들 +namespace { // Local Util functions inline int NumElements(const TfLiteEvalTensor* t) { int count = 1; - // TfLiteEvalTensor의 dims는 TfLiteIntArray* 타입입니다. for (int i = 0; i < t->dims->size; ++i) { count *= t->dims->data[i]; } @@ -41,10 +40,12 @@ inline int NumElements(const TfLiteEvalTensor* t) { } } // namespace -// TfLiteNode에서 입력 (indices, depth, on_value, off_value) 및 출력 텐서 -// (output) 를 가져옴 params->axis 를 읽어 실제로 Depth 차원이 들어갈 위치 -// (Axis) 계산 Prepare과 Eval 함수 내에서 잠시 생성되었다가 사라짐 → Stack -// memory 사용 효율적 +// Retrieves the input tensors (indices, depth, on_value, off_value) and the +// output tensor (output) from the TfLiteNode. +// Reads params->axis to compute the actual position (axis) where the depth +// dimension will be inserted. +// These values are created temporarily within the Prepare and Eval functions +// and are destroyed afterward → efficient use of stack memory. struct OneHotContext { OneHotContext(TfLiteContext* context, TfLiteNode* node) { indices = tflite::micro::GetEvalInput(context, node, kIndicesTensor); @@ -62,7 +63,7 @@ struct OneHotContext { } const TfLiteEvalTensor* indices; - const TfLiteEvalTensor* depth; // 새로 생기는 One-hot 차원 크기 + const TfLiteEvalTensor* depth; const TfLiteEvalTensor* on_value; const TfLiteEvalTensor* off_value; TfLiteEvalTensor* output; @@ -72,8 +73,7 @@ struct OneHotContext { TfLiteType dtype; }; -// 실제 연산 수행 함수 -// +// Operation function template void OneHotComputeImpl(const OneHotContext& op_context) { int prefix_dim_size = 1; @@ -121,20 +121,17 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const OneHotContext& op_context) { TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0); - // depth 데이터 읽기 + // read depth data const int depth_val = *tflite::micro::GetTensorData(op_context.depth); TF_LITE_ENSURE(context, depth_val >= 0); - // Output Tensor 검증 + // Output Tensor evaluation TF_LITE_ENSURE(context, op_context.output != nullptr); TF_LITE_ENSURE(context, op_context.output->dims != nullptr); - // todo - // TFLM에서는 Output Tensor의 dims가 이미 할당되어 있다고 가정합니다. - // 하지만 모델이 생성될 때 계산된 dims와 현재 depth값으로 계산한 dims가 - // 일치하는지 확인은 필요합니다. + // TFLM assumes that the output tensor’s dims are already allocated const int expected_dims_size = op_context.output_dims; TF_LITE_ENSURE_EQ(context, op_context.output->dims->size, expected_dims_size); @@ -148,8 +145,8 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, expected_dim_i = op_context.indices->dims->data[i - 1]; } - // TFLM 컴파일러(Offline Memory Planner)가 할당해둔 크기와 실제 계산 크기가 - // 다르면 에러 + // If the size pre-allocated by the TFLM compiler (Offline Memory Planner) + // does not match the actual computed size, an error is raised. TF_LITE_ENSURE_EQ(context, op_context.output->dims->data[i], expected_dim_i); } @@ -191,8 +188,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type, op_context.dtype); - // depth 텐서가 상수가 아니더라도, 테스트에서는 output shape를 - // 미리 지정해 두었으므로 여기서는 그냥 검증만 수행 + // Even if the depth tensor is not a constant, the test predefines the output + // shape, so here we only perform validation. return ResizeOutputTensor(context, op_context); } @@ -227,7 +224,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace one_hot -// 헤더에 선언된 Register_ONE_HOT 구현 +// Implementation of Register_ONE_HOT declared in the header const TFLMRegistration* Register_ONE_HOT() { static TFLMRegistration r = {}; diff --git a/tensorflow/lite/micro/kernels/one_hot.h b/tensorflow/lite/micro/kernels/one_hot.h index 4593e12a178..789f8f7657c 100644 --- a/tensorflow/lite/micro/kernels/one_hot.h +++ b/tensorflow/lite/micro/kernels/one_hot.h @@ -8,7 +8,7 @@ namespace tflite { namespace ops { namespace micro { -// ONE_HOT 커널 등록 함수 (all_ops_resolver 등에서 사용) +// ONE_HOT Kernel regist function (use at all_ops_resolver) const TFLMRegistration* Register_ONE_HOT(); } // namespace micro diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index dac03cdd569..a2b052557ee 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -17,14 +17,14 @@ namespace tflite { namespace testing { namespace { -// OneHot 연산 테스트를 위한 헬퍼 함수 +// Helper function for OneHot operation test template void TestOneHot(const int* indices_dims, const int32_t* indices_data, const int* depth_dims, const int32_t* depth_data, const int* on_dims, const T* on_data, const int* off_dims, const T* off_data, const int* output_dims, const T* expected_output_data, T* output_data, int axis = -1) { - // 1. 텐서 설정 + // 1. Tensor Setting TfLiteIntArray* in_dims = IntArrayFromInts(indices_dims); TfLiteIntArray* d_dims = IntArrayFromInts(depth_dims); TfLiteIntArray* on_val_dims = IntArrayFromInts(on_dims); @@ -33,26 +33,25 @@ void TestOneHot(const int* indices_dims, const int32_t* indices_data, const int output_dims_count = ElementCount(*out_dims); - // 2. 입력 텐서 생성 + // 2. Create Input Tensor constexpr int inputs_size = 4; constexpr int outputs_size = 1; constexpr int tensors_size = inputs_size + outputs_size; TfLiteTensor tensors[tensors_size] = { CreateTensor(indices_data, in_dims), CreateTensor(depth_data, d_dims), CreateTensor(on_data, on_val_dims), CreateTensor(off_data, off_val_dims), - CreateTensor(output_data, out_dims), // 출력 텐서 (데이터는 비워둠) + CreateTensor(output_data, out_dims), // Output Tensor }; - // 3. 파라미터 설정 + // 3. Parameter setting TfLiteOneHotParams builtin_data = {axis}; - // 4. KernelRunner 실행 + // 4. KernelRunner execution int inputs_array_data[] = {4, 0, 1, 2, 3}; // indices, depth, on, off TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data); int outputs_array_data[] = {1, 4}; // output tensor index TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); - // 등록 함수 이름은 구현하신 이름으로 변경 (예: // tflite::ops::micro::Register_ONE_HOT) const TFLMRegistration registration = *tflite::ops::micro::Register_ONE_HOT(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, @@ -62,7 +61,7 @@ void TestOneHot(const int* indices_dims, const int32_t* indices_data, TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); - // 5. 결과 검증 + // 5. Result evaluation for (int i = 0; i < output_dims_count; ++i) { TF_LITE_MICRO_EXPECT_EQ(expected_output_data[i], output_data[i]); } @@ -93,7 +92,7 @@ TF_LITE_MICRO_TEST(OneHot_BasicInt32) { const int output_dims[] = {2, 3, 3}; const int32_t expected_output[] = {1, 0, 0, 0, 1, 0, 0, 0, 1}; - int32_t output_data[9]; // 결과 받을 버퍼 + int32_t output_data[9]; tflite::testing::TestOneHot(indices_dims, indices_data, depth_dims, depth_data, on_dims, on_data, off_dims, off_data, From 8ecac956fac3c2e925007d9edfa184eb02329931 Mon Sep 17 00:00:00 2001 From: junseokShim Date: Wed, 3 Dec 2025 20:41:03 +0900 Subject: [PATCH 13/13] TFLM: Add ONE_HOT operator implementation and tests --- tensorflow/lite/micro/one_hot_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/micro/one_hot_test.cc b/tensorflow/lite/micro/one_hot_test.cc index a2b052557ee..a4b58d642dd 100644 --- a/tensorflow/lite/micro/one_hot_test.cc +++ b/tensorflow/lite/micro/one_hot_test.cc @@ -71,6 +71,7 @@ void TestOneHot(const int* indices_dims, const int32_t* indices_data, } // namespace testing } // namespace tflite +// UNIT TEST TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(OneHot_BasicInt32) {