mxnet
nn.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
25 #ifndef NNVM_TOP_NN_H_
26 #define NNVM_TOP_NN_H_
27 
28 #include <dmlc/base.h>
29 #include <dmlc/parameter.h>
30 #include <nnvm/tuple.h>
31 #include <nnvm/layout.h>
32 #include <string>
33 #include "tensor.h"
34 
35 namespace nnvm {
36 namespace top {
37 
38 struct DenseParam : public dmlc::Parameter<DenseParam> {
39  int units;
40  bool use_bias;
41 
43  DMLC_DECLARE_FIELD(units).set_lower_bound(1)
44  .describe("Number of hidden units of the dense transformation.");
45  DMLC_DECLARE_FIELD(use_bias).set_default(true)
46  .describe("Whether to use bias parameter");
47  }
48  // constants
49  static const constexpr int kData = 0;
50  static const constexpr int kWeight = 1;
51  static const constexpr int kBias = 2;
52 };
53 
54 struct DropoutParam : public dmlc::Parameter<DropoutParam> {
55  float rate;
56 
58  DMLC_DECLARE_FIELD(rate).set_default(0.5)
59  .set_range(0, 1)
60  .describe("Fraction of the input that gets dropped out during training time.");
61  }
62 };
63 
64 struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
65  int axis;
66  double epsilon;
67  double momentum;
68  bool center;
69  bool scale;
70 
72  DMLC_DECLARE_FIELD(axis).set_default(1)
73  .describe("Specify which shape axis the channel is specified.");
74  DMLC_DECLARE_FIELD(epsilon).set_default(1e-5)
75  .describe("Small float added to variance to avoid dividing by zero.");
76  DMLC_DECLARE_FIELD(center).set_default(true)
77  .describe("If True, add offset of `beta` to normalized tensor."
78  "If False, `beta` is ignored.");
79  DMLC_DECLARE_FIELD(scale).set_default(true)
80  .describe("If True, multiply by `gamma`. If False, `gamma` is not used."
81  "When the next layer is piecewise linear (also e.g. `nn.relu`),"
82  "this can be disabled since the scaling"
83  "will be done by the next layer.");
84  }
85  // constants
86  static const constexpr int kData = 0;
87  static const constexpr int kGamma = 1;
88  static const constexpr int kBeta = 2;
89  static const constexpr int kMovingMean = 3;
90  static const constexpr int kMovingVariance = 4;
91 };
92 
93 
94 // Shared by softmax and log_softmax
95 struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
96  int axis;
97 
99  DMLC_DECLARE_FIELD(axis).set_default(-1)
100  .describe("The axis to sum over when computing softmax.");
101  }
102 };
103 
104 struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
105  double alpha;
106 
108  DMLC_DECLARE_FIELD(alpha).set_lower_bound(0.0).set_default(0.25)
109  .describe("slope coefficient for the negative half axis.");
110  }
111 };
112 
113 struct PReLUParam : public dmlc::Parameter<PReLUParam> {
114  int axis;
116  DMLC_DECLARE_FIELD(axis).set_default(1)
117  .describe("Specify which shape axis the channel is specified.");
118  }
119 };
120 
121 struct PadParam : public dmlc::Parameter<PadParam> {
122  float pad_value;
124 
126  DMLC_DECLARE_FIELD(pad_value).set_default(0.0)
127  .describe("The value to be padded.");
128  DMLC_DECLARE_FIELD(pad_width)
129  .describe("Number of values padded to the edges of each axis, "
130  "in the format of ((before_1, after_1), ... (before_N, after_N))");
131  }
132 };
133 
134 
135 struct Conv2DParam : public dmlc::Parameter<Conv2DParam> {
136  int channels;
141  int groups;
142  std::string layout;
143  std::string kernel_layout;
144  std::string out_layout;
146  bool use_bias;
147 
149  DMLC_DECLARE_FIELD(channels)
150  .describe("The dimensionality of the output space"
151  "i.e. the number of output channels in the convolution.");
152  DMLC_DECLARE_FIELD(kernel_size)
153  .describe("Specifies the dimensions of the convolution window.");
154  DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
155  .describe("Specifies the strides of the convolution.");
156  DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0}))
157  .describe("If padding is non-zero, then the input is implicitly zero-padded"
158  "on both sides for padding number of points");
159  DMLC_DECLARE_FIELD(dilation).set_default(TShape({1, 1}))
160  .describe("Specifies the dilation rate to use for dilated convolution.");
161  DMLC_DECLARE_FIELD(groups).set_default(1)
162  .describe("Controls the connections between inputs and outputs."
163  "At groups=1, all inputs are convolved to all outputs."
164  "At groups=2, the operation becomes equivalent to having two convolution"
165  "layers side by side, each seeing half the input channels, and producing"
166  "half the output channels, and both subsequently concatenated.");
167  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
168  .describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
169  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
170  "dimensions respectively. Convolution is applied on the 'H' and"
171  "'W' dimensions.");
172  DMLC_DECLARE_FIELD(out_layout).set_default("__undef__")
173  .describe("Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
174  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
175  "dimensions respectively. Default to be same as input layout.");
176  DMLC_DECLARE_FIELD(kernel_layout).set_default("OIHW")
177  .describe("Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
178  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
179  "dimensions respectively.");
180  DMLC_DECLARE_DTYPE_FIELD(out_dtype)
181  .add_enum("same", -1)
182  .set_default(-1)
183  .describe("Output data type, set to explicit type under mixed precision setting");
184 
185  DMLC_DECLARE_FIELD(use_bias).set_default(true)
186  .describe("Whether the layer uses a bias vector.");
187  }
188  // constants
189  static const constexpr int kData = 0;
190  static const constexpr int kWeight = 1;
191  static const constexpr int kBias = 2;
192 };
193 
194 struct WinogradWeightTransformParam : public dmlc::Parameter<WinogradWeightTransformParam> {
196 
198  DMLC_DECLARE_FIELD(tile_size)
199  .describe("Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)");
200  }
201 
202  static const constexpr int kWeight = 0;
203 };
204 
206  : public dmlc::Parameter<WinogradNNPACKWeightTransformParam> {
209 
211  DMLC_DECLARE_FIELD(convolution_algorithm)
212  .describe(
213  "The convolution algorithm for Winograd NNPACK. "
214  "E.g. tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8 for WT_8x8, "
215  "tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16 for WT_8x8_FP16");
216  DMLC_DECLARE_DTYPE_FIELD(out_dtype)
217  .add_enum("same", -1)
218  .set_default(-1)
219  .describe("Output data type, set to explicit type under mixed precision setting");
220  }
221 
222  static const constexpr int kWeight = 0;
223 };
224 
225 struct WinogradConv2DParam : public dmlc::Parameter<WinogradConv2DParam> {
226  int channels;
231  int groups;
232  std::string layout;
233  std::string kernel_layout;
234  std::string out_layout;
236  bool use_bias;
238 
240  DMLC_DECLARE_FIELD(channels)
241  .describe("The dimensionality of the output space"
242  "i.e. the number of output channels in the convolution.");
243  DMLC_DECLARE_FIELD(kernel_size)
244  .describe("Specifies the dimensions of the convolution window.");
245  DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
246  .describe("Specifies the strides of the convolution.");
247  DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0}))
248  .describe("If padding is non-zero, then the input is implicitly zero-padded"
249  "on both sides for padding number of points");
250  DMLC_DECLARE_FIELD(dilation).set_default(TShape({1, 1}))
251  .describe("Specifies the dilation rate to use for dilated convolution.");
252  DMLC_DECLARE_FIELD(groups).set_default(1)
253  .describe("Controls the connections between inputs and outputs."
254  "At groups=1, all inputs are convolved to all outputs."
255  "At groups=2, the operation becomes equivalent to having two convolution"
256  "layers side by side, each seeing half the input channels, and producing"
257  "half the output channels, and both subsequently concatenated.");
258  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
259  .describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
260  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
261  "dimensions respectively. Convolution is applied on the 'H' and"
262  "'W' dimensions.");
263  DMLC_DECLARE_FIELD(out_layout).set_default("__undef__")
264  .describe("Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
265  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
266  "dimensions respectively. Default to be same as input layout.");
267  DMLC_DECLARE_FIELD(kernel_layout).set_default("OIHW")
268  .describe("Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
269  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
270  "dimensions respectively.");
271  DMLC_DECLARE_DTYPE_FIELD(out_dtype)
272  .add_enum("same", -1)
273  .set_default(-1)
274  .describe("Output data type, set to explicit type under mixed precision setting");
275  DMLC_DECLARE_FIELD(use_bias).set_default(true)
276  .describe("Whether the layer uses a bias vector.");
277  DMLC_DECLARE_FIELD(tile_size)
278  .describe("Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)");
279  }
280  // constants
281  static const constexpr int kData = 0;
282  static const constexpr int kWeight = 1;
283  static const constexpr int kBias = 2;
284 };
285 
286 struct Conv2DTransposeParam : public dmlc::Parameter<Conv2DTransposeParam> {
287  int channels;
293  int groups;
294  std::string layout;
295  std::string kernel_layout;
297  bool use_bias;
298 
300  DMLC_DECLARE_FIELD(channels)
301  .describe("The dimensionality of the output space"
302  "i.e. the number of output channels in the convolution.");
303  DMLC_DECLARE_FIELD(kernel_size)
304  .describe("Specifies the dimensions of the convolution window.");
305  DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
306  .describe("Specifies the strides of the convolution.");
307  DMLC_DECLARE_FIELD(output_padding).set_default(TShape({0, 0}))
308  .describe("Zero-padding added to one side of the output.");
309  DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0}))
310  .describe("If padding is non-zero, then the input is implicitly zero-padded"
311  "on both sides for padding number of points");
312  DMLC_DECLARE_FIELD(dilation).set_default(TShape({1, 1}))
313  .describe("Specifies the dilation rate to use for dilated convolution.");
314  DMLC_DECLARE_FIELD(groups).set_default(1)
315  .describe("Controls the connections between inputs and outputs."
316  "At groups=1, all inputs are convolved to all outputs."
317  "At groups=2, the operation becomes equivalent to having two convolution"
318  "layers side by side, each seeing half the input channels, and producing"
319  "half the output channels, and both subsequently concatenated.");
320  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
321  .describe("Dimension ordering of data. Can be 'NCHW', 'NHWC', etc."
322  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
323  "dimensions respectively. Convolution is applied on the 'H' and"
324  "'W' dimensions.");
325  DMLC_DECLARE_FIELD(kernel_layout).set_default("OIHW")
326  .describe("Dimension ordering of data and weight. Can be 'OIHW', 'OIHW16o16i', etc."
327  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
328  "dimensions respectively.");
329  DMLC_DECLARE_DTYPE_FIELD(out_dtype)
330  .add_enum("same", -1)
331  .set_default(-1)
332  .describe("Output data type, set to explicit type under mixed precision setting");
333  DMLC_DECLARE_FIELD(use_bias).set_default(true)
334  .describe("Whether the layer uses a bias vector.");
335  }
336  // constants
337  static const constexpr int kData = 0;
338  static const constexpr int kWeight = 1;
339  static const constexpr int kBias = 2;
340 };
341 
342 
343 struct MaxPool2DParam : public dmlc::Parameter<MaxPool2DParam> {
347  std::string layout;
348  bool ceil_mode;
349 
351  DMLC_DECLARE_FIELD(pool_size)
352  .describe("Size of the pooling windows..");
353  DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
354  .describe("Specifies the strides of the convolution.");
355  DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0}))
356  .describe("If padding is non-zero, then the input is implicitly zero-padded"
357  "Padding support both symmetric and asymmetric as"
358  "one int : same padding used on all sides"
359  "two int : bottom, right will use same padding as top, left"
360  "four int : padding width in the order of (top, left, bottom, right)");
361  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
362  .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc."
363  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
364  "dimensions respectively. Convolution is applied on the 'H' and"
365  "'W' dimensions.");
366  DMLC_DECLARE_FIELD(ceil_mode).set_default(false)
367  .describe("When true, will use ceil instead of floor to compute the output shape.");
368  }
369 };
370 
371 
372 struct AvgPool2DParam : public dmlc::Parameter<AvgPool2DParam> {
376  std::string layout;
377  bool ceil_mode;
379 
381  DMLC_DECLARE_FIELD(pool_size)
382  .describe("Size of the pooling windows..");
383  DMLC_DECLARE_FIELD(strides).set_default(TShape({1, 1}))
384  .describe("Specifies the strides of the convolution.");
385  DMLC_DECLARE_FIELD(padding).set_default(TShape({0, 0}))
386  .describe("If padding is non-zero, then the input is implicitly zero-padded"
387  "Padding support both symmetric and asymmetric as"
388  "one int : same padding used on all sides"
389  "two int : bottom, right will use same padding as top, left"
390  "four int : padding width in the order of (top, left, bottom, right)");
391  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
392  .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc."
393  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
394  "dimensions respectively. Convolution is applied on the 'H' and"
395  "'W' dimensions.");
396  DMLC_DECLARE_FIELD(ceil_mode).set_default(false)
397  .describe("When true, will use ceil instead of floor to compute the output shape.");
398  DMLC_DECLARE_FIELD(count_include_pad).set_default(false)
399  .describe("When true, will include padding to compute the average");
400  }
401 };
402 
403 
404 struct GlobalPool2DParam : public dmlc::Parameter<GlobalPool2DParam> {
405  std::string layout;
406 
408  DMLC_DECLARE_FIELD(layout).set_default("NCHW")
409  .describe("Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc."
410  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
411  "dimensions respectively. Convolution is applied on the 'H' and"
412  "'W' dimensions.");
413  }
414 };
415 
416 struct UpSamplingParam : public dmlc::Parameter<UpSamplingParam> {
417  int scale;
418  std::string layout;
419  std::string method;
420 
422  DMLC_DECLARE_FIELD(scale)
423  .describe("upsampling scaling factor");
424  DMLC_DECLARE_FIELD(layout)
425  .set_default("NCHW")
426  .describe("Dimension ordering of data. Can be 'NCHW', 'NHWC', etc."
427  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
428  "dimensions respectively. Upsampling is applied on the 'H' and"
429  "'W' dimensions.");
430  DMLC_DECLARE_FIELD(method)
431  .set_default("NEAREST_NEIGHBOR")
432  .describe("Specify the mode to use for scaling."
433  "NEAREST_NEIGHBOR - Nearest Neighbor"
434  "BILINEAR - Bilinear Interpolation");
435  }
436 };
437 
438 struct LayoutTransformParam : public dmlc::Parameter<LayoutTransformParam> {
439  std::string src_layout;
440  std::string dst_layout;
441 
443  DMLC_DECLARE_FIELD(src_layout).set_default("__undef__")
444  .describe("Dimension ordering of data");
445  DMLC_DECLARE_FIELD(dst_layout).set_default("__undef__")
446  .describe("Dimension ordering of data.");
447  }
448 };
449 
450 struct MultiBoxPriorParam : public dmlc::Parameter<MultiBoxPriorParam> {
455  bool clip;
456 
458  DMLC_DECLARE_FIELD(sizes).set_default(Tuple<float>({1.0}))
459  .describe("List of sizes of generated MultiBoxPriores.");
460  DMLC_DECLARE_FIELD(ratios).set_default(Tuple<float>({1.0}))
461  .describe("List of aspect ratios of generated MultiBoxPriores.");
462  DMLC_DECLARE_FIELD(steps).set_default(Tuple<float>({-1.0, -1.0}))
463  .describe("Priorbox step across y and x, -1 for auto calculation.");
464  DMLC_DECLARE_FIELD(offsets).set_default(Tuple<float>({0.5, 0.5}))
465  .describe("Priorbox center offsets, y and x respectively.");
466  DMLC_DECLARE_FIELD(clip).set_default(false)
467  .describe("Whether to clip out-of-boundary boxes.");
468  }
469 };
470 
471 struct MultiBoxTransformLocParam : public dmlc::Parameter<MultiBoxTransformLocParam> {
472  bool clip;
473  float threshold;
476  DMLC_DECLARE_FIELD(clip).set_default(true)
477  .describe("Clip out-of-boundary boxes.");
478  DMLC_DECLARE_FIELD(threshold).set_default(0.01)
479  .describe("Threshold to be a positive prediction.");
480  DMLC_DECLARE_FIELD(variances).set_default(Tuple<float>({0.1f, 0.1f, 0.2f, 0.2f}))
481  .describe("Variances to be decoded from box regression output.");
482  }
483 };
484 
485 struct NonMaximumSuppressionParam : public dmlc::Parameter<NonMaximumSuppressionParam> {
489  int top_k;
490  int id_index;
496  DMLC_DECLARE_FIELD(max_output_size).set_default(-1)
497  .describe("Max number of output valid boxes for each instance."
498  "By default all valid boxes are returned.");
499  DMLC_DECLARE_FIELD(iou_threshold).set_default(0.5)
500  .describe("Non-maximum suppression threshold.");
501  DMLC_DECLARE_FIELD(force_suppress).set_default(false)
502  .describe("Suppress all detections regardless of class_id.");
503  DMLC_DECLARE_FIELD(top_k).set_default(-1)
504  .describe("Keep maximum top k detections before nms, -1 for no limit.");
505  DMLC_DECLARE_FIELD(coord_start).set_default(2)
506  .describe("Start index of the consecutive 4 coordinates.");
507  DMLC_DECLARE_FIELD(score_index).set_default(1)
508  .describe("Index of the scores/confidence of boxes.");
509  DMLC_DECLARE_FIELD(id_index).set_default(0)
510  .describe("Axis index of id.");
511  DMLC_DECLARE_FIELD(return_indices).set_default(true)
512  .describe("Whether to return box indices in input data.");
513  DMLC_DECLARE_FIELD(invalid_to_bottom).set_default(false)
514  .describe("Whether to move all invalid bounding boxes to the bottom.");
515  }
516 };
517 
518 struct LRNParam : public dmlc::Parameter<LRNParam> {
519  int size;
520  int axis;
521  float alpha;
522  float beta;
523  float bias;
524 
526  DMLC_DECLARE_FIELD(size)
527  .describe("The size of the local region to be considered for normalization.");
528  DMLC_DECLARE_FIELD(axis)
529  .describe("input data layout channel axis");
530  DMLC_DECLARE_FIELD(alpha)
531  .describe("The scaling parameter.");
532  DMLC_DECLARE_FIELD(beta)
533  .describe("The exponent parameter.");
534  DMLC_DECLARE_FIELD(bias)
535  .describe("The offset parameter.");
536  }
537  // constants
538  static const constexpr int kData = 0;
539 };
540 
541 struct L2NormalizeParam : public dmlc::Parameter<L2NormalizeParam> {
542  float eps;
544 
546  DMLC_DECLARE_FIELD(eps)
547  .describe("float type epsilon value.");
548  DMLC_DECLARE_FIELD(axis)
549  .describe("axis over the normalization applied");
550  }
551 };
552 
553 } // namespace top
554 } // namespace nnvm
555 
556 #endif // NNVM_TOP_NN_H_
TShape strides
Definition: nn.h:228
Definition: base.h:36
Definition: nn.h:541
TShape kernel_size
Definition: nn.h:227
bool use_bias
Definition: nn.h:236
bool scale
Definition: nn.h:69
int out_dtype
Definition: nn.h:235
Tuple< int > axis
Definition: nn.h:543
TShape pool_size
Definition: nn.h:344
Definition: nn.h:343
A dynamic sized array data structure that is optimized for storing small number of elements with same...
Definition: tuple.h:52
int max_output_size
Definition: nn.h:493
Tuple< float > steps
Definition: nn.h:453
float threshold
Definition: nn.h:473
TShape strides
Definition: nn.h:289
DMLC_DECLARE_PARAMETER(GlobalPool2DParam)
Definition: nn.h:407
bool clip
Definition: nn.h:455
Definition: nn.h:121
static const constexpr int kWeight
Definition: nn.h:50
Definition: nn.h:135
DMLC_DECLARE_PARAMETER(L2NormalizeParam)
Definition: nn.h:545
int coord_start
Definition: nn.h:491
DMLC_DECLARE_PARAMETER(LRNParam)
Definition: nn.h:525
TShape kernel_size
Definition: nn.h:288
int id_index
Definition: nn.h:490
A Shape class that is used to represent shape of each tensor.
Definition: tuple.h:344
DMLC_DECLARE_PARAMETER(LeakyReLUParam)
Definition: nn.h:107
int axis
Definition: nn.h:65
bool invalid_to_bottom
Definition: nn.h:494
float eps
Definition: nn.h:542
int tile_size
Definition: nn.h:237
float iou_threshold
Definition: nn.h:487
bool use_bias
Definition: nn.h:297
DMLC_DECLARE_PARAMETER(WinogradWeightTransformParam)
Definition: nn.h:197
bool force_suppress
Definition: nn.h:488
std::string layout
Definition: nn.h:142
Definition: nn.h:416
bool use_bias
Definition: nn.h:146
Definition: nn.h:38
int axis
Definition: nn.h:96
bool return_indices
Definition: nn.h:486
int size
Definition: nn.h:519
TShape strides
Definition: nn.h:345
DMLC_DECLARE_PARAMETER(Conv2DTransposeParam)
Definition: nn.h:299
DMLC_DECLARE_PARAMETER(BatchNormParam)
Definition: nn.h:71
DMLC_DECLARE_PARAMETER(AvgPool2DParam)
Definition: nn.h:380
int units
Definition: nn.h:39
bool ceil_mode
Definition: nn.h:348
Tuple< float > sizes
Definition: nn.h:451
int channels
Definition: nn.h:287
DMLC_DECLARE_PARAMETER(UpSamplingParam)
Definition: nn.h:421
static const constexpr int kBias
Definition: nn.h:51
DMLC_DECLARE_PARAMETER(WinogradNNPACKWeightTransformParam)
Definition: nn.h:210
DMLC_DECLARE_PARAMETER(Conv2DParam)
Definition: nn.h:148
std::string layout
Definition: nn.h:376
TShape pool_size
Definition: nn.h:373
int out_dtype
Definition: nn.h:145
DMLC_DECLARE_PARAMETER(NonMaximumSuppressionParam)
Definition: nn.h:495
DMLC_DECLARE_PARAMETER(LayoutTransformParam)
Definition: nn.h:442
double momentum
Definition: nn.h:67
TShape padding
Definition: nn.h:290
#define DMLC_DECLARE_DTYPE_FIELD(name)
Definition: tensor.h:117
bool ceil_mode
Definition: nn.h:377
TShape padding
Definition: nn.h:375
Definition: nn.h:113
std::string out_layout
Definition: nn.h:144
float bias
Definition: nn.h:523
std::string out_layout
Definition: nn.h:234
int channels
Definition: nn.h:226
Definition: nn.h:372
Definition: nn.h:225
std::string kernel_layout
Definition: nn.h:295
int out_dtype
Definition: nn.h:296
Data structure Tuple and TShape to store dynamic sized shapes.
TShape padding
Definition: nn.h:346
std::string src_layout
Definition: nn.h:439
Tuple< float > offsets
Definition: nn.h:454
Auxiliary param for tensor primitive.
bool center
Definition: nn.h:68
DMLC_DECLARE_PARAMETER(WinogradConv2DParam)
Definition: nn.h:239
TShape dilation
Definition: nn.h:140
bool count_include_pad
Definition: nn.h:378
std::string layout
Definition: nn.h:418
TShape dilation
Definition: nn.h:230
DMLC_DECLARE_PARAMETER(PReLUParam)
Definition: nn.h:115
int axis
Definition: nn.h:520
int channels
Definition: nn.h:136
Definition: nn.h:104
double alpha
Definition: nn.h:105
TShape kernel_size
Definition: nn.h:137
std::string dst_layout
Definition: nn.h:440
int axis
Definition: nn.h:114
int groups
Definition: nn.h:141
Tuple< float > ratios
Definition: nn.h:452
int groups
Definition: nn.h:231
TShape dilation
Definition: nn.h:292
DMLC_DECLARE_PARAMETER(MaxPool2DParam)
Definition: nn.h:350
int groups
Definition: nn.h:293
int scale
Definition: nn.h:417
Definition: nn.h:95
float pad_value
Definition: nn.h:122
std::string layout
Definition: nn.h:294
Definition: nn.h:64
float rate
Definition: nn.h:55
TShape strides
Definition: nn.h:138
TShape padding
Definition: nn.h:229
Definition: nn.h:518
Definition: nn.h:450
bool use_bias
Definition: nn.h:40
static const constexpr int kData
Definition: nn.h:49
double epsilon
Definition: nn.h:66
Tuple< float > variances
Definition: nn.h:474
TShape output_padding
Definition: nn.h:291
int score_index
Definition: nn.h:492
Layout expression. The layout is composed of upper cases, lower cases and numbers, where upper case indicates a (super-)dimension and the corresponding lower case with factor size indicates the split (sub-)dimension. For example, NCHW16c can describe a 5-D tensor of [batch_size, channel, height, width, channel_block]. Here sub-dimension channel_block=16 is the split of super-dimension C (channel).
DMLC_DECLARE_PARAMETER(PadParam)
Definition: nn.h:125
bool clip
Definition: nn.h:472
std::string layout
Definition: nn.h:347
Definition: nn.h:404
std::string layout
Definition: nn.h:405
float alpha
Definition: nn.h:521
DMLC_DECLARE_PARAMETER(DenseParam)
Definition: nn.h:42
DMLC_DECLARE_PARAMETER(DropoutParam)
Definition: nn.h:57
DMLC_DECLARE_PARAMETER(MultiBoxPriorParam)
Definition: nn.h:457
DMLC_DECLARE_PARAMETER(MultiBoxTransformLocParam)
Definition: nn.h:475
std::string kernel_layout
Definition: nn.h:143
std::string kernel_layout
Definition: nn.h:233
DMLC_DECLARE_PARAMETER(SoftmaxParam)
Definition: nn.h:98
Tuple< Tuple< int > > pad_width
Definition: nn.h:123
Provide lightweight util to do parameter setup and checking.
std::string method
Definition: nn.h:419
Definition: nn.h:54
TShape padding
Definition: nn.h:139
std::string layout
Definition: nn.h:232
TShape strides
Definition: nn.h:374
float beta
Definition: nn.h:522