mxnet
tensor_blob.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
27 #ifndef MXNET_TENSOR_BLOB_H_
28 #define MXNET_TENSOR_BLOB_H_
29 
30 #include <dmlc/logging.h>
31 #include <dmlc/json.h>
32 #include <dlpack/dlpack.h>
33 #include <vector>
34 #include <iostream>
35 #include <utility>
36 #include <algorithm>
37 #include "./base.h"
38 
39 namespace mxnet {
40 
41 // redefine DLPack enumeration to be backward compatible.
42 constexpr const int kCPU = kDLCPU;
43 constexpr const int kGPU = kDLGPU;
44 // extension type code under TVM function.
45 // Currently NNVM reserved 16 to 19 type code from TVM
46 // 16, 17, 18 is used by NNVM compiler already.
47 // Pick code 19 for MXNet NDArray
48 constexpr const int kTVMNDArrayTypeCode = 19;
49 
50 /* Forward declaration for friend declaration in TBlob */
51 class NDArray;
52 
65 class TBlob {
66  friend class NDArray;
67 
68  public:
70  void* dptr_;
75 
77  TBlob(void) : dptr_(nullptr), type_flag_(mshadow::DataType<real_t>::kFlag) {
78  SetDLTensor(cpu::kDevMask, 0);
79  }
87  template <typename DType>
88  TBlob(DType* dptr, const mxnet::TShape& shape, int dev_mask, int dev_id = -1)
89  : dptr_(dptr), shape_(shape), type_flag_(mshadow::DataType<DType>::kFlag) {
90  SetDLTensor(dev_mask, dev_id);
91  }
100  TBlob(void* dptr, const mxnet::TShape& shape, int dev_mask, int type_flag, int dev_id = -1)
101  : dptr_(dptr), shape_(shape), type_flag_(type_flag) {
102  SetDLTensor(dev_mask, dev_id);
103  }
108  explicit TBlob(const DLTensor& dltensor)
109  : dptr_(dltensor.data),
110  shape_(mxnet::TShape(dltensor.shape, dltensor.shape + dltensor.ndim)),
111  type_flag_(DLDataTypeTransform(dltensor.dtype)),
112  dltensor_(dltensor) {
113  // compactness check for DLTensor
114  if (dltensor.strides != nullptr) {
115  // check strides
116  const int& ndim = dltensor.ndim;
117  const int64_t* shape = dltensor.shape;
118  const int64_t* strides = dltensor.strides;
119  if (ndim >= 1) {
120  bool err = false;
121  if (strides[ndim - 1] != 1) {
122  err = true;
123  } else {
124  for (int i = ndim - 2; i >= 0; --i) {
125  if (strides[i] != shape[i + 1] * strides[i + 1]) {
126  err = true;
127  break;
128  }
129  }
130  }
131  if (err) {
132  LOG(FATAL) << "Unsupported DLPack because MXNet only support compact tensor now";
133  }
134  }
135  }
136  }
144  template <typename Device, int dim, typename DType>
145  TBlob(const mshadow::Tensor<Device, dim, DType>& src) { // NOLINT(*)
146  *this = src;
147  }
152  TBlob(const TBlob& src) : dptr_(src.dptr_), shape_(src.shape_), type_flag_(src.type_flag_) {
153  this->SetDLTensor(src.dev_mask(), src.dev_id());
154  }
163  template <typename Device, int dim, typename DType>
165  dptr_ = src.dptr_;
166  shape_ = src.shape_;
168  SetDLTensor(Device::kDevMask, -1);
169  return *this;
170  }
176  inline TBlob& operator=(const TBlob& src) {
177  dptr_ = src.dptr_;
178  shape_ = src.shape_;
179  type_flag_ = src.type_flag_;
180  SetDLTensor(src.dev_mask(), src.dev_id());
181  return *this;
182  }
186  inline bool CheckContiguous(void) const {
187  return true;
188  }
194  inline TBlob reshape(const mxnet::TShape& shape) const {
195  CHECK_EQ(this->shape_.Size(), shape.Size())
196  << "Shape size mismatch " << this->shape_.Size() << " v.s. " << shape.Size();
197  TBlob ret(this->dptr_, shape, this->dev_mask(), this->type_flag_, this->dev_id());
198  return ret;
199  }
207  template <typename Device, typename DType>
209  mshadow::Stream<Device>* stream = nullptr) const {
210  CHECK(Device::kDevMask == this->dev_mask())
211  << "TBlob.get: device type do not match specified type";
213  << "TBlob.get_with_shape: data type do not match specified type. "
214  << "Expected: " << mshadow::dtype_string(type_flag_) << " v.s. given "
216  return mshadow::Tensor<Device, 2, DType>(static_cast<DType*>(dptr_), shape_.FlatTo2D(), stream);
217  }
225  template <typename Device, typename DType>
227  mshadow::Stream<Device>* stream = nullptr) const {
228  return this->get_with_shape<Device, 1, DType>(mshadow::Shape1(shape_.Size()), stream);
229  }
231  inline int ndim(void) const {
232  return shape_.ndim();
233  }
240  inline index_t size(index_t idx) const {
241  return shape_[idx];
242  }
244  inline size_t Size(void) const {
245  return shape_.Size();
246  }
248  template <typename DType>
249  inline DType* dptr() const {
251  << "TBlob.get_with_shape: data type do not match specified type. "
252  << "Expected: " << mshadow::dtype_string(type_flag_) << " v.s. given "
254  return static_cast<DType*>(dptr_);
255  }
257  inline int dev_mask() const {
258  return dltensor_.ctx.device_type;
259  }
261  inline int dev_id() const {
262  return dltensor_.ctx.device_id;
263  }
268  inline const DLTensor& dltensor() const {
269  return dltensor_;
270  }
271 
281  template <typename Device, int dim, typename DType>
283  CHECK(Device::kDevMask == this->dev_mask())
284  << "TBlob.get: device type do not match specified type";
286  dptr<DType>(), shape_.get<dim>(), shape_[shape_.ndim() - 1], stream);
287  }
298  template <typename Device, int dim, typename DType>
300  const mshadow::Shape<dim>& shape,
301  mshadow::Stream<Device>* stream = nullptr) const {
302  CHECK(Device::kDevMask == this->dev_mask())
303  << "TBlob.get: device type do not match specified type";
304  CHECK_EQ(this->CheckContiguous(), true) << "TBlob.get_reshape: must be contiguous";
305  CHECK_EQ(this->shape_.Size(), static_cast<size_t>(shape.Size()))
306  << "TBlob.get_with_shape: new and old shape do not match total elements";
307  return mshadow::Tensor<Device, dim, DType>(dptr<DType>(), shape, shape[dim - 1], stream);
308  }
318  template <typename Device, typename DType>
320  int axis,
321  mshadow::Stream<Device>* stream = nullptr) const {
322  return this->get_with_shape<Device, 3, DType>(this->shape_.FlatTo3D(axis), stream);
323  }
334  template <typename Device, typename DType>
336  FlatTo3D(int axis_begin, int axis_end, mshadow::Stream<Device>* stream = nullptr) const {
337  return this->get_with_shape<Device, 3, DType>(this->shape_.FlatTo3D(axis_begin, axis_end),
338  stream);
339  }
349  template <typename Device, int dim, typename DType>
351  mshadow::Stream<Device>* stream = nullptr) const {
352  mshadow::Shape<dim> shape;
353  shape[0] = 1;
354  // Pad higher dimensions in case dim > ndim()
355  for (int i = 0; i < dim - ndim(); ++i) {
356  shape[i] = 1;
357  }
358  // Collapse higher dimensions in case dim < ndim()
359  for (int i = 0; i < ndim() - dim + 1; ++i) {
360  shape[0] *= shape_[i];
361  }
362  // Preserve lower dimensions.
363  for (int i = std::max(0, ndim() - dim + 1); i < ndim(); ++i) {
364  shape[i - ndim() + dim] = shape_[i];
365  }
366  return this->get_with_shape<Device, dim, DType>(shape, stream);
367  }
368 
369  private:
370  static DLDataType DTypeTransform(int type_flag) {
371  switch (type_flag) {
372  case mshadow::kFloat32:
373  return DLDataType{kDLFloat, 32, 1};
374  case mshadow::kFloat64:
375  return DLDataType{kDLFloat, 64, 1};
376  case mshadow::kFloat16:
377  return DLDataType{kDLFloat, 16, 1};
378  case mshadow::kBfloat16:
379  return DLDataType{kDLBfloat, 16, 1};
380  case mshadow::kUint8:
381  return DLDataType{kDLUInt, 8, 1};
382  case mshadow::kInt32:
383  return DLDataType{kDLInt, 32, 1};
384  case mshadow::kInt8:
385  return DLDataType{kDLInt, 8, 1};
386  case mshadow::kInt64:
387  return DLDataType{kDLInt, 64, 1};
388  case mshadow::kBool:
389  return DLDataType{kDLUInt, 1, 1};
390  case mshadow::kInt16:
391  return DLDataType{kDLInt, 16, 1};
392  case mshadow::kUint16:
393  return DLDataType{kDLUInt, 16, 1};
394  case mshadow::kUint32:
395  return DLDataType{kDLUInt, 32, 1};
396  case mshadow::kUint64:
397  return DLDataType{kDLUInt, 64, 1};
398  default: {
399  LOG(FATAL) << "Unknown type_flag=" << type_flag;
400  return DLDataType();
401  }
402  }
403  }
404  static int DLDataTypeTransform(DLDataType dldata_type) {
405  if (dldata_type.lanes != 1) {
406  LOG(FATAL) << "Unsupported DLDataType whose lanes != 1";
407  }
408  switch (dldata_type.code) {
409  case kDLFloat:
410  switch (dldata_type.bits) {
411  case 16:
412  return mshadow::kFloat16;
413  case 32:
414  return mshadow::kFloat32;
415  case 64:
416  return mshadow::kFloat64;
417  }
418  break;
419  case kDLBfloat:
420  switch (dldata_type.bits) {
421  case 16:
422  return mshadow::kBfloat16;
423  }
424  break;
425  case kDLUInt:
426  switch (dldata_type.bits) {
427  case 1:
428  return mshadow::kBool;
429  case 8:
430  return mshadow::kUint8;
431  case 16:
432  return mshadow::kUint16;
433  case 32:
434  return mshadow::kUint32;
435  case 64:
436  return mshadow::kUint64;
437  }
438  break;
439  case kDLInt:
440  switch (dldata_type.bits) {
441  case 8:
442  return mshadow::kInt8;
443  case 16:
444  return mshadow::kInt16;
445  case 32:
446  return mshadow::kInt32;
447  case 64:
448  return mshadow::kInt64;
449  }
450  break;
451  }
452  LOG(FATAL) << "Unknown DLDataType{" << dldata_type.code << ", " << dldata_type.bits << ", "
453  << dldata_type.lanes << "}";
454  return mshadow::kFloat32;
455  }
456 
457  inline void SetDLTensor(int dev_mask, int dev_id) {
458  dltensor_.data = dptr_;
459  dltensor_.ctx = DLContext{static_cast<DLDeviceType>(dev_mask), dev_id};
460  dltensor_.ndim = shape_.ndim();
461  dltensor_.dtype = DTypeTransform(type_flag_);
462  dltensor_.shape = shape_.data();
463  dltensor_.strides = nullptr;
464  dltensor_.byte_offset = 0;
465  }
466 
467  private:
469  DLTensor dltensor_;
470 };
471 } // namespace mxnet
472 
473 namespace dmlc {
474 // Add a few patches to support mxnet::TShape in dmlc/parameter.
475 DMLC_DECLARE_TYPE_NAME(mxnet::TShape, "Shape(tuple)");
480 
481 namespace parameter {
482 
483 template <>
484 class FieldEntry<mxnet::TShape> : public FieldEntryBase<FieldEntry<mxnet::TShape>, mxnet::TShape> {
485  public:
486  FieldEntry() : enforce_nonzero_(false), expect_ndim_(0) {}
487  // parent class
488  typedef FieldEntryBase<FieldEntry<mxnet::TShape>, mxnet::TShape> Parent;
489 
490  virtual void Check(void* head) const {
491  Parent::Check(head);
492  mxnet::TShape& v = this->Get(head);
493  if (expect_ndim_ != 0 && v.ndim() != expect_ndim_) {
494  std::ostringstream os;
495  os << "value " << v << "for Parameter " << this->key_
496  << " has wrong dimensions, expected dimension=" << expect_ndim_;
497  throw dmlc::ParamError(os.str());
498  }
499  if (enforce_nonzero_) {
500  for (int i = 0; i < v.ndim(); ++i) {
501  if (v[i] == 0U) {
502  std::ostringstream os;
503  os << "value " << v << "for Parameter " << this->key_
504  << " is invalid, the input shape must be nonzero in all dimensions";
505  throw dmlc::ParamError(os.str());
506  }
507  }
508  }
509  }
511  this->enforce_nonzero_ = true;
512  return this->self();
513  }
515  expect_ndim_ = ndim;
516  return this->self();
517  }
518 
519  private:
520  // whether all the entries need to be nonzero
521  bool enforce_nonzero_;
522  // expected number of dimension, default = 0 means no restriction.
523  int expect_ndim_;
524 };
525 
526 } // namespace parameter
527 } // namespace dmlc
528 
529 #endif // MXNET_TENSOR_BLOB_H_
mxnet
namespace of mxnet
Definition: api_registry.h:33
kDLFloat
@ kDLFloat
Definition: dlpack.h:82
mxnet::kCPU
constexpr const int kCPU
Definition: tensor_blob.h:42
mxnet::TBlob::TBlob
TBlob(const mshadow::Tensor< Device, dim, DType > &src)
constructor from tensor
Definition: tensor_blob.h:145
DLDataType
The data type the tensor can hold.
Definition: dlpack.h:94
mshadow::Shape::Size
MSHADOW_XINLINE index_t Size(void) const
Definition: tensor.h:158
dmlc::parameter::FieldEntry< mxnet::TShape >::FieldEntry
FieldEntry()
Definition: tensor_blob.h:486
mshadow::Stream
computaion stream structure, used for asynchronous computations
Definition: tensor.h:488
mxnet::Tuple
A dynamic sized array data structure that is optimized for storing small number of elements with same...
Definition: tuple.h:57
mshadow::kUint16
@ kUint16
Definition: base.h:361
mxnet::TBlob::TBlob
TBlob(void)
default constructor, default copy assign will work
Definition: tensor_blob.h:77
mshadow::kUint64
@ kUint64
Definition: base.h:363
mxnet::TBlob::dptr
DType * dptr() const
get pointer in dtype
Definition: tensor_blob.h:249
DLTensor::data
void * data
The opaque data pointer points to the allocated data. This will be CUDA device pointer or cl_mem hand...
Definition: dlpack.h:132
nnvm::Tuple
A dynamic sized array data structure that is optimized for storing small number of elements with same...
Definition: tuple.h:52
dmlc
namespace for dmlc
Definition: array_view.h:12
DLContext::device_type
DLDeviceType device_type
The device type used in the device.
Definition: dlpack.h:71
dmlc::parameter::FieldEntry< mxnet::TShape >::Check
virtual void Check(void *head) const
Definition: tensor_blob.h:490
dmlc::parameter::FieldEntry< mxnet::TShape >
Definition: tensor_blob.h:484
mshadow::kInt8
@ kInt8
Definition: base.h:357
mshadow::kUint32
@ kUint32
Definition: base.h:362
mxnet::TBlob::TBlob
TBlob(void *dptr, const mxnet::TShape &shape, int dev_mask, int type_flag, int dev_id=-1)
constructor that construct TBlob from contiguous memory
Definition: tensor_blob.h:100
DLTensor::ndim
int ndim
Number of dimensions.
Definition: dlpack.h:136
mxnet::TBlob::CheckContiguous
bool CheckContiguous(void) const
Definition: tensor_blob.h:186
mxnet::TBlob::FlatTo2D
mshadow::Tensor< Device, 2, DType > FlatTo2D(mshadow::Stream< Device > *stream=nullptr) const
flatten the tensor to 2 dimension, collapse the higher dimensions together
Definition: tensor_blob.h:208
DLContext
A Device context for Tensor and operator.
Definition: dlpack.h:69
kDLCPU
@ kDLCPU
CPU device.
Definition: dlpack.h:40
DLTensor::byte_offset
uint64_t byte_offset
The offset in bytes to the beginning pointer to data.
Definition: dlpack.h:147
mshadow::Tensor
general tensor
Definition: tensor.h:525
mxnet::TBlob::dptr_
void * dptr_
pointer to the data
Definition: tensor_blob.h:70
mxnet::TBlob::Size
size_t Size(void) const
total number of elements in the tensor
Definition: tensor_blob.h:244
mshadow::cpu::kDevMask
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:43
mxnet::TBlob::dev_id
int dev_id() const
device index of the corresponding device
Definition: tensor_blob.h:261
DLDeviceType
DLDeviceType
The device type in DLContext.
Definition: dlpack.h:38
mxnet::Tuple::ndim
int ndim() const
Definition: tuple.h:217
dmlc::DMLC_DECLARE_TYPE_NAME
DMLC_DECLARE_TYPE_NAME(optional< int >, "int or None")
description for optional int
mshadow::kBool
@ kBool
Definition: base.h:359
mxnet::TBlob::get_with_shape
mshadow::Tensor< Device, dim, DType > get_with_shape(const mshadow::Shape< dim > &shape, mshadow::Stream< Device > *stream=nullptr) const
fetch a tensor in given shape If size do not match the stored size, an error will be issued
Definition: tensor_blob.h:299
mshadow::kFloat64
@ kFloat64
Definition: base.h:353
mxnet::TBlob::operator=
TBlob & operator=(const TBlob &src)
assignment from TBlob (copy assignment)
Definition: tensor_blob.h:176
mxnet::TBlob::dev_mask
int dev_mask() const
device mask of the corresponding device
Definition: tensor_blob.h:257
mxnet::TBlob::ndim
int ndim(void) const
return number of dimension of the tensor inside
Definition: tensor_blob.h:231
mshadow::kInt16
@ kInt16
Definition: base.h:360
DLTensor::strides
int64_t * strides
strides of the tensor (in number of elements, not bytes) can be NULL, indicating tensor is compact an...
Definition: dlpack.h:145
mxnet::TBlob::type_flag_
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
mxnet::TBlob::TBlob
TBlob(DType *dptr, const mxnet::TShape &shape, int dev_mask, int dev_id=-1)
constructor that construct TBlob from contiguous memory
Definition: tensor_blob.h:88
mxnet::TBlob::get
mshadow::Tensor< Device, dim, DType > get(mshadow::Stream< Device > *stream=nullptr) const
fetch the tensor, with respect to specific dimension if dim do not match the stored dimension,...
Definition: tensor_blob.h:282
mxnet::TBlob::operator=
TBlob & operator=(const mshadow::Tensor< Device, dim, DType > &src)
assignment from tensor
Definition: tensor_blob.h:164
mxnet::TBlob::FlatToKD
mshadow::Tensor< Device, dim, DType > FlatToKD(mshadow::Stream< Device > *stream=nullptr) const
flatten the tensor to specified number of dimensions, collapse the highest dimensions or pad with hig...
Definition: tensor_blob.h:350
kDLGPU
@ kDLGPU
CUDA GPU device.
Definition: dlpack.h:42
DLDataType::code
uint8_t code
Type code of base types. We keep it uint8_t instead of DLDataTypeCode for minimal memory footprint,...
Definition: dlpack.h:100
mshadow::Tensor::shape_
Shape< dimension > shape_
shape of the tensor
Definition: tensor.h:541
mxnet::NDArray
ndarray interface
Definition: ndarray.h:82
mshadow::kInt64
@ kInt64
Definition: base.h:358
DLDataType::bits
uint8_t bits
Number of bits, common choices are 8, 16, 32.
Definition: dlpack.h:104
mshadow::DataType
Definition: base.h:368
kDLInt
@ kDLInt
Definition: dlpack.h:80
mxnet::TBlob
tensor blob class that can be used to hold tensor of any dimension, any device and any data type,...
Definition: tensor_blob.h:65
DLTensor::ctx
DLContext ctx
The device context of the tensor.
Definition: dlpack.h:134
mshadow::kInt32
@ kInt32
Definition: base.h:356
dmlc::parameter::FieldEntry< mxnet::TShape >::set_expect_ndim
FieldEntry< mxnet::TShape > & set_expect_ndim(int ndim)
Definition: tensor_blob.h:514
mxnet::TBlob::FlatTo3D
mshadow::Tensor< Device, 3, DType > FlatTo3D(int axis_begin, int axis_end, mshadow::Stream< Device > *stream=nullptr) const
flatten the tensor to 3 dimension, collapse the dimension: [0, axis_begin), [axis_begin,...
Definition: tensor_blob.h:336
mxnet::TBlob::size
index_t size(index_t idx) const
return size of i-th dimension, start counting from highest dimension. return type needs to be a signe...
Definition: tensor_blob.h:240
DLTensor::dtype
DLDataType dtype
The data type of the pointer.
Definition: dlpack.h:138
mxnet::TBlob::dltensor
const DLTensor & dltensor() const
return the corresponding DLTensor
Definition: tensor_blob.h:268
kDLUInt
@ kDLUInt
Definition: dlpack.h:81
mshadow::dtype_string
std::string dtype_string(const int dtype)
Definition: base.h:1811
mxnet::TShape::Size
size_t Size() const
Definition: tuple.h:523
mxnet::TShape::data
const dim_t * data() const
Definition: tuple.h:552
mshadow::Tensor< Device, 1, DType >
Definition: tensor.h:673
mshadow
overloaded + operator between half_t and bf16_t
Definition: base.h:319
mxnet::TBlob::TBlob
TBlob(const DLTensor &dltensor)
constructor that construct TBlob from DLTensor
Definition: tensor_blob.h:108
DLTensor::shape
int64_t * shape
The shape of the tensor.
Definition: dlpack.h:140
dmlc::parameter::FieldEntry< mxnet::TShape >::enforce_nonzero
FieldEntry< mxnet::TShape > & enforce_nonzero()
Definition: tensor_blob.h:510
mxnet::kTVMNDArrayTypeCode
constexpr const int kTVMNDArrayTypeCode
Definition: tensor_blob.h:48
mxnet::index_t
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:81
mshadow::Shape< dim >
mshadow::kUint8
@ kUint8
Definition: base.h:355
mshadow::Tensor::dptr_
DType * dptr_
pointer to the data
Definition: tensor.h:539
mshadow::kBfloat16
@ kBfloat16
Definition: base.h:364
mxnet::TBlob::TBlob
TBlob(const TBlob &src)
constructor from TBlob (copy constructor)
Definition: tensor_blob.h:152
mxnet::TBlob::FlatTo1D
mshadow::Tensor< Device, 1, DType > FlatTo1D(mshadow::Stream< Device > *stream=nullptr) const
flatten the tensor to 1 dimension, collapse all the dimensions together.
Definition: tensor_blob.h:226
mxnet::TBlob::FlatTo3D
mshadow::Tensor< Device, 3, DType > FlatTo3D(int axis, mshadow::Stream< Device > *stream=nullptr) const
flatten the tensor to 3 dimension, collapse the dimension before and after specified axis.
Definition: tensor_blob.h:319
json.h
Lightweight JSON Reader/Writer that read save into C++ data structs. This includes STL composites and...
mshadow::Shape1
MSHADOW_XINLINE Shape< 1 > Shape1(index_t s0)
construct a one dimension shape, stride will equal s0
Definition: tensor.h:220
mxnet::TBlob::shape_
mxnet::TShape shape_
shape of the tensor
Definition: tensor_blob.h:72
mxnet::TShape
A Shape class that is used to represent shape of each tensor.
Definition: tuple.h:440
dmlc::parameter::FieldEntry< mxnet::TShape >::Parent
FieldEntryBase< FieldEntry< mxnet::TShape >, mxnet::TShape > Parent
Definition: tensor_blob.h:488
DLContext::device_id
int device_id
The device index.
Definition: dlpack.h:73
DLDataType::lanes
uint16_t lanes
Number of lanes in the type, used for vector types.
Definition: dlpack.h:106
DLTensor
Plain C Tensor object, does not manage memory.
Definition: dlpack.h:112
mxnet::TBlob::reshape
TBlob reshape(const mxnet::TShape &shape) const
reshape to shape
Definition: tensor_blob.h:194
dlpack.h
The common header of DLPack.
mxnet::kGPU
constexpr const int kGPU
Definition: tensor_blob.h:43
mshadow::kFloat16
@ kFloat16
Definition: base.h:354
dmlc::optional
c++17 compatible optional class.
Definition: optional.h:43
base.h
configuration of MXNet as well as basic data structure.
mxnet::real_t
mshadow::default_real_t real_t
data type that will be used to store ndarray
Definition: base.h:85
mshadow::kFloat32
@ kFloat32
Definition: base.h:352
kDLBfloat
@ kDLBfloat
Definition: dlpack.h:83