This repository was archived by the owner on Aug 5, 2022. It is now read-only.
File tree Expand file tree Collapse file tree 15 files changed +27751
-56
lines changed
models/intel_optimized_models Expand file tree Collapse file tree 15 files changed +27751
-56
lines changed Original file line number Diff line number Diff line change 1+ name: "AlexNet"
2+ layer {
3+ name: "data"
4+ type: "DummyData"
5+ top: "data"
6+ top: "label"
7+ include {
8+ phase: TRAIN
9+ }
10+ dummy_data_param {
11+ data_filler {
12+ type: "constant"
13+ value: 0.01
14+ }
15+ shape: { dim: 256 dim: 3 dim: 224 dim: 224 }
16+ shape: { dim: 256 dim: 1 dim: 1 dim: 1 }
17+ }
18+ }
19+ layer {
20+ name: "data"
21+ type: "DummyData"
22+ top: "data"
23+ top: "label"
24+ include {
25+ phase: TEST
26+ }
27+ dummy_data_param {
28+ data_filler {
29+ type: "constant"
30+ value: 0.01
31+ }
32+ shape: { dim: 256 dim: 3 dim: 224 dim: 224 }
33+ shape: { dim: 256 dim: 1 dim: 1 dim: 1 }
34+ }
35+ }
36+
37+ layer {
38+ name: "conv1"
39+ type: "Convolution"
40+ bottom: "data"
41+ top: "conv1"
42+ param {
43+ lr_mult: 1
44+ decay_mult: 1
45+ }
46+ param {
47+ lr_mult: 2
48+ decay_mult: 0
49+ }
50+ convolution_param {
51+ num_output: 96
52+ kernel_size: 11
53+ stride: 4
54+ weight_filler {
55+ type: "gaussian"
56+ std: 0.01
57+ }
58+ bias_filler {
59+ type: "constant"
60+ value: 0
61+ }
62+ }
63+ }
64+ layer {
65+ name: "relu1"
66+ type: "ReLU"
67+ bottom: "conv1"
68+ top: "conv1"
69+ }
70+ layer {
71+ name: "norm1"
72+ type: "LRN"
73+ bottom: "conv1"
74+ top: "norm1"
75+ lrn_param {
76+ local_size: 5
77+ alpha: 0.0001
78+ beta: 0.75
79+ }
80+ }
81+ layer {
82+ name: "pool1"
83+ type: "Pooling"
84+ bottom: "norm1"
85+ top: "pool1"
86+ pooling_param {
87+ pool: MAX
88+ kernel_size: 3
89+ stride: 2
90+ }
91+ }
92+ layer {
93+ name: "conv2"
94+ type: "Convolution"
95+ bottom: "pool1"
96+ top: "conv2"
97+ param {
98+ lr_mult: 1
99+ decay_mult: 1
100+ }
101+ param {
102+ lr_mult: 2
103+ decay_mult: 0
104+ }
105+ convolution_param {
106+ num_output: 256
107+ pad: 2
108+ kernel_size: 5
109+ group: 2
110+ weight_filler {
111+ type: "gaussian"
112+ std: 0.01
113+ }
114+ bias_filler {
115+ type: "constant"
116+ value: 0.1
117+ }
118+ }
119+ }
120+ layer {
121+ name: "relu2"
122+ type: "ReLU"
123+ bottom: "conv2"
124+ top: "conv2"
125+ }
126+ layer {
127+ name: "norm2"
128+ type: "LRN"
129+ bottom: "conv2"
130+ top: "norm2"
131+ lrn_param {
132+ local_size: 5
133+ alpha: 0.0001
134+ beta: 0.75
135+ }
136+ }
137+ layer {
138+ name: "pool2"
139+ type: "Pooling"
140+ bottom: "norm2"
141+ top: "pool2"
142+ pooling_param {
143+ pool: MAX
144+ kernel_size: 3
145+ stride: 2
146+ }
147+ }
148+ layer {
149+ name: "conv3"
150+ type: "Convolution"
151+ bottom: "pool2"
152+ top: "conv3"
153+ param {
154+ lr_mult: 1
155+ decay_mult: 1
156+ }
157+ param {
158+ lr_mult: 2
159+ decay_mult: 0
160+ }
161+ convolution_param {
162+ num_output: 384
163+ pad: 1
164+ kernel_size: 3
165+ weight_filler {
166+ type: "gaussian"
167+ std: 0.01
168+ }
169+ bias_filler {
170+ type: "constant"
171+ value: 0
172+ }
173+ }
174+ }
175+ layer {
176+ name: "relu3"
177+ type: "ReLU"
178+ bottom: "conv3"
179+ top: "conv3"
180+ }
181+ layer {
182+ name: "conv4"
183+ type: "Convolution"
184+ bottom: "conv3"
185+ top: "conv4"
186+ param {
187+ lr_mult: 1
188+ decay_mult: 1
189+ }
190+ param {
191+ lr_mult: 2
192+ decay_mult: 0
193+ }
194+ convolution_param {
195+ num_output: 384
196+ pad: 1
197+ kernel_size: 3
198+ group: 2
199+ weight_filler {
200+ type: "gaussian"
201+ std: 0.01
202+ }
203+ bias_filler {
204+ type: "constant"
205+ value: 0.1
206+ }
207+ }
208+ }
209+ layer {
210+ name: "relu4"
211+ type: "ReLU"
212+ bottom: "conv4"
213+ top: "conv4"
214+ }
215+ layer {
216+ name: "conv5"
217+ type: "Convolution"
218+ bottom: "conv4"
219+ top: "conv5"
220+ param {
221+ lr_mult: 1
222+ decay_mult: 1
223+ }
224+ param {
225+ lr_mult: 2
226+ decay_mult: 0
227+ }
228+ convolution_param {
229+ num_output: 256
230+ pad: 1
231+ kernel_size: 3
232+ group: 2
233+ weight_filler {
234+ type: "gaussian"
235+ std: 0.01
236+ }
237+ bias_filler {
238+ type: "constant"
239+ value: 0.1
240+ }
241+ }
242+ }
243+ layer {
244+ name: "relu5"
245+ type: "ReLU"
246+ bottom: "conv5"
247+ top: "conv5"
248+ }
249+ layer {
250+ name: "pool5"
251+ type: "Pooling"
252+ bottom: "conv5"
253+ top: "pool5"
254+ pooling_param {
255+ pool: MAX
256+ kernel_size: 3
257+ stride: 2
258+ }
259+ }
260+ layer {
261+ name: "fc6"
262+ type: "InnerProduct"
263+ bottom: "pool5"
264+ top: "fc6"
265+ param {
266+ lr_mult: 1
267+ decay_mult: 1
268+ }
269+ param {
270+ lr_mult: 2
271+ decay_mult: 0
272+ }
273+ inner_product_param {
274+ num_output: 4096
275+ weight_filler {
276+ type: "gaussian"
277+ std: 0.005
278+ }
279+ bias_filler {
280+ type: "constant"
281+ value: 0.1
282+ }
283+ }
284+ }
285+ layer {
286+ name: "relu6"
287+ type: "ReLU"
288+ bottom: "fc6"
289+ top: "fc6"
290+ }
291+ layer {
292+ name: "drop6"
293+ type: "Dropout"
294+ bottom: "fc6"
295+ top: "fc6"
296+ dropout_param {
297+ dropout_ratio: 0.5
298+ }
299+ }
300+ layer {
301+ name: "fc7"
302+ type: "InnerProduct"
303+ bottom: "fc6"
304+ top: "fc7"
305+ param {
306+ lr_mult: 1
307+ decay_mult: 1
308+ }
309+ param {
310+ lr_mult: 2
311+ decay_mult: 0
312+ }
313+ inner_product_param {
314+ num_output: 4096
315+ weight_filler {
316+ type: "gaussian"
317+ std: 0.005
318+ }
319+ bias_filler {
320+ type: "constant"
321+ value: 0.1
322+ }
323+ }
324+ }
325+ layer {
326+ name: "relu7"
327+ type: "ReLU"
328+ bottom: "fc7"
329+ top: "fc7"
330+ }
331+ layer {
332+ name: "drop7"
333+ type: "Dropout"
334+ bottom: "fc7"
335+ top: "fc7"
336+ dropout_param {
337+ dropout_ratio: 0.5
338+ }
339+ }
340+ layer {
341+ name: "fc8"
342+ type: "InnerProduct"
343+ bottom: "fc7"
344+ top: "fc8"
345+ param {
346+ lr_mult: 1
347+ decay_mult: 1
348+ }
349+ param {
350+ lr_mult: 2
351+ decay_mult: 0
352+ }
353+ inner_product_param {
354+ num_output: 1000
355+ weight_filler {
356+ type: "gaussian"
357+ std: 0.01
358+ }
359+ bias_filler {
360+ type: "constant"
361+ value: 0
362+ }
363+ }
364+ }
365+ layer {
366+ name: "loss"
367+ type: "SoftmaxWithLoss"
368+ bottom: "fc8"
369+ bottom: "label"
370+ top: "loss"
371+ }
372+ layer {
373+ name: "loss3/top-1"
374+ type: "Accuracy"
375+ bottom: "fc8"
376+ bottom: "label"
377+ top: "loss3/top-1"
378+ include {
379+ phase: TEST
380+ }
381+ }
382+ layer {
383+ name: "loss3/top-5"
384+ type: "Accuracy"
385+ bottom: "fc8"
386+ bottom: "label"
387+ top: "loss3/top-5"
388+ include {
389+ phase: TEST
390+ }
391+ accuracy_param {
392+ top_k: 5
393+ }
394+ }
You can’t perform that action at this time.
0 commit comments