Skip to content

Commit c414911

Browse files
committed
Fix
Signed-off-by: Janusz Lisiecki <[email protected]>
1 parent 7d47417 commit c414911

File tree

3 files changed

+135
-16
lines changed

3 files changed

+135
-16
lines changed

dali/python/nvidia/dali/plugin/pytorch/loader_evaluator/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,5 @@
1313
# limitations under the License.
1414

1515
from .loader import LoaderEvaluator
16-
from .metrics import PerformanceMetrics
1716

18-
__all__ = ["LoaderEvaluator", "PerformanceMetrics"]
17+
__all__ = ["LoaderEvaluator"]

docs/examples/frameworks/pytorch/loader_evaluator/pytorch_data_loader_evaluator.ipynb

Lines changed: 133 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,17 @@
2626
},
2727
{
2828
"cell_type": "code",
29-
"execution_count": null,
29+
"execution_count": 1,
3030
"metadata": {},
31-
"outputs": [],
31+
"outputs": [
32+
{
33+
"name": "stdout",
34+
"output_type": "stream",
35+
"text": [
36+
"Using device: cuda\n"
37+
]
38+
}
39+
],
3240
"source": [
3341
"import os\n",
3442
"import time\n",
@@ -60,9 +68,17 @@
6068
},
6169
{
6270
"cell_type": "code",
63-
"execution_count": null,
71+
"execution_count": 2,
6472
"metadata": {},
65-
"outputs": [],
73+
"outputs": [
74+
{
75+
"name": "stdout",
76+
"output_type": "stream",
77+
"text": [
78+
"Dataset size: 1000\n"
79+
]
80+
}
81+
],
6682
"source": [
6783
"# Our model\n",
6884
"class UltraLightModel(nn.Module):\n",
@@ -136,7 +152,7 @@
136152
},
137153
{
138154
"cell_type": "code",
139-
"execution_count": null,
155+
"execution_count": 3,
140156
"metadata": {},
141157
"outputs": [],
142158
"source": [
@@ -176,9 +192,53 @@
176192
},
177193
{
178194
"cell_type": "code",
179-
"execution_count": null,
195+
"execution_count": 4,
180196
"metadata": {},
181-
"outputs": [],
197+
"outputs": [
198+
{
199+
"name": "stdout",
200+
"output_type": "stream",
201+
"text": [
202+
"Baseline Training (Real Data Loading)\n"
203+
]
204+
},
205+
{
206+
"name": "stderr",
207+
"output_type": "stream",
208+
"text": [
209+
"Epoch 0: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 63/63 [00:03<00:00, 17.77it/s, Time=3.5s]\n"
210+
]
211+
},
212+
{
213+
"name": "stdout",
214+
"output_type": "stream",
215+
"text": [
216+
"Epoch 0 - Time: 3.55s\n"
217+
]
218+
},
219+
{
220+
"name": "stderr",
221+
"output_type": "stream",
222+
"text": [
223+
"Epoch 1: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 63/63 [00:03<00:00, 17.90it/s, Time=3.5s]"
224+
]
225+
},
226+
{
227+
"name": "stdout",
228+
"output_type": "stream",
229+
"text": [
230+
"Epoch 1 - Time: 3.52s\n",
231+
"Baseline average epoch time: 3.53s\n"
232+
]
233+
},
234+
{
235+
"name": "stderr",
236+
"output_type": "stream",
237+
"text": [
238+
"\n"
239+
]
240+
}
241+
],
182242
"source": [
183243
"# Your existing training setup\n",
184244
"model = UltraLightModel(num_classes=1000).to(device)\n",
@@ -209,7 +269,7 @@
209269
},
210270
{
211271
"cell_type": "code",
212-
"execution_count": null,
272+
"execution_count": 5,
213273
"metadata": {},
214274
"outputs": [],
215275
"source": [
@@ -228,9 +288,53 @@
228288
},
229289
{
230290
"cell_type": "code",
231-
"execution_count": null,
291+
"execution_count": 6,
232292
"metadata": {},
233-
"outputs": [],
293+
"outputs": [
294+
{
295+
"name": "stdout",
296+
"output_type": "stream",
297+
"text": [
298+
"No-Overhead Training (Cached Data Loading)\n"
299+
]
300+
},
301+
{
302+
"name": "stderr",
303+
"output_type": "stream",
304+
"text": [
305+
"Epoch 0: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 63/63 [00:00<00:00, 65.49it/s, Time=1.0s]\n"
306+
]
307+
},
308+
{
309+
"name": "stdout",
310+
"output_type": "stream",
311+
"text": [
312+
"Epoch 0 - Time: 0.96s\n"
313+
]
314+
},
315+
{
316+
"name": "stderr",
317+
"output_type": "stream",
318+
"text": [
319+
"Epoch 1: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 63/63 [00:00<00:00, 64.57it/s, Time=1.0s]"
320+
]
321+
},
322+
{
323+
"name": "stdout",
324+
"output_type": "stream",
325+
"text": [
326+
"Epoch 1 - Time: 0.98s\n",
327+
"No-Overhead average epoch time: 0.97s\n"
328+
]
329+
},
330+
{
331+
"name": "stderr",
332+
"output_type": "stream",
333+
"text": [
334+
"\n"
335+
]
336+
}
337+
],
234338
"source": [
235339
"# Train with the same setup, just different dataloader\n",
236340
"print(\"No-Overhead Training (Cached Data Loading)\")\n",
@@ -256,9 +360,25 @@
256360
},
257361
{
258362
"cell_type": "code",
259-
"execution_count": null,
363+
"execution_count": 7,
260364
"metadata": {},
261-
"outputs": [],
365+
"outputs": [
366+
{
367+
"name": "stdout",
368+
"output_type": "stream",
369+
"text": [
370+
"\n",
371+
"Performance Comparison:\n",
372+
"Baseline: 3.53s per epoch\n",
373+
"No-Overhead: 0.97s per epoch\n",
374+
"Speedup: 3.64x\n",
375+
"Time saved: 2.56s per epoch (72.6%)\n",
376+
"\n",
377+
"*** DATA LOADING BOTTLENECK DETECTED ***\n",
378+
"You could speed up training by 72.6% by optimizing data loading.\n"
379+
]
380+
}
381+
],
262382
"source": [
263383
"# Compare performance\n",
264384
"speedup = baseline_avg_time / sol_avg_time\n",
@@ -322,7 +442,7 @@
322442
"name": "python",
323443
"nbconvert_exporter": "python",
324444
"pygments_lexer": "ipython3",
325-
"version": "3.12.3"
445+
"version": "3.10.19"
326446
}
327447
},
328448
"nbformat": 4,

qa/TL0_python-self-test-core/test_body.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ test_py_with_framework() {
2222
done
2323

2424
${python_new_invoke_test} -A '!slow,!pytorch,!mxnet,!cupy' test_backend_impl
25-
${python_new_invoke_test} -A '!slow,!pytorch,!mxnet,!cupy' test_pytorch_loader_evaluator
2625

2726
if [ -z "$DALI_ENABLE_SANITIZERS" ]; then
2827
${python_new_invoke_test} -A 'numba' -s type_annotations
@@ -39,6 +38,7 @@ test_py() {
3938
python test_coco_tfrecord.py -i 64
4039
python test_data_containers.py -s -b 20
4140
python test_data_containers.py -s -b 20 -n
41+
${python_new_invoke_test} test_pytorch_loader_evaluator
4242
}
4343

4444
test_autograph() {

0 commit comments

Comments
 (0)