mxnet
ndarray.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
24 #ifndef MXNET_NDARRAY_H_
25 #define MXNET_NDARRAY_H_
26 
27 #include <dmlc/base.h>
28 #include <dmlc/io.h>
29 #include <dmlc/logging.h>
30 #include <dmlc/registry.h>
31 #include <dmlc/type_traits.h>
32 #include <nnvm/node.h>
33 
34 #include <algorithm>
35 #include <map>
36 #include <memory>
37 #include <string>
38 #include <vector>
39 #include "./base.h"
40 #include "./engine.h"
41 #include "./storage.h"
42 // check c++11
43 #if DMLC_USE_CXX11 == 0
44 #error "cxx11 was required for ndarray module"
45 #endif
46 
47 namespace dnnl {
48 struct memory;
49 } // namespace dnnl
50 
51 namespace mxnet {
52 // enum for storage types
53 namespace csr {
55 }
56 
57 namespace rowsparse {
59 }
60 
62  kUndefinedStorage = -1, // undefined storage
63  kDefaultStorage, // dense
64  kRowSparseStorage, // row sparse
65  kCSRStorage, // csr
66 };
67 
69  kNormalErr, // normal
70  kCSRShapeErr, // shape mismatch for csr
71  kCSRIndPtrErr, // indptr error for csr
72  kCSRIdxErr, // idx error for csr
73  kRSPShapeErr, // shape mismatch for row sparse
74  kRSPIdxErr, // indices error for row sparse
75 };
76 
77 class DNNLMemory;
78 
82 class NDArray {
83  public:
85  NDArray() : autograd_entry_(nullptr) {}
94  Context ctx,
95  bool delay_alloc = false,
97  : ptr_(std::make_shared<Chunk>(shape, ctx, delay_alloc, dtype)),
98  shape_(shape),
99  dtype_(dtype),
100  storage_type_(kDefaultStorage),
101  autograd_entry_(nullptr) {}
105  const mxnet::TShape& shape,
106  Context ctx,
107  bool delay_alloc = true,
109  const std::vector<int>& aux_types = {},
110  const mxnet::ShapeVector& aux_shapes = {},
112  ReInit(stype, shape, ctx, dtype, delay_alloc, &aux_types, &aux_shapes, &storage_shape);
113  }
121  : ptr_(std::make_shared<Chunk>(mxnet::TShape(mshadow::Shape1(0)), ctx, true, dtype)),
122  shape_(),
123  dtype_(dtype),
124  storage_type_(kDefaultStorage),
125  autograd_entry_(nullptr) {}
133  NDArray(const TBlob& data, int dev_id)
134  : ptr_(std::make_shared<Chunk>(data, dev_id)),
135  shape_(data.shape_),
136  dtype_(data.type_flag_),
137  storage_type_(kDefaultStorage),
138  autograd_entry_(nullptr) {}
139 
148  NDArray(const TBlob& data, int dev_id, const std::function<void()>& deleter)
149  : ptr_(new Chunk(data, dev_id),
150  [deleter](Chunk* p) {
151  deleter(); // call custom deleter
152  delete p; // delete Chunk object
153  }),
154  shape_(data.shape_),
155  dtype_(data.type_flag_),
156  storage_type_(kDefaultStorage),
157  autograd_entry_(nullptr) {}
158 
160  NDArray(int shared_pid, int shared_id, const mxnet::TShape& shape, int dtype)
161  : ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)),
162  shape_(shape),
163  dtype_(dtype),
164  storage_type_(kDefaultStorage),
165  autograd_entry_(nullptr) {}
166 
178  const mxnet::TShape& shape,
179  const TBlob& data,
180  const std::vector<TBlob>& aux_data,
181  int dev_id)
182  : ptr_(std::make_shared<Chunk>(stype, data, aux_data, dev_id)),
183  shape_(shape),
184  dtype_(data.type_flag_),
185  storage_type_(stype),
186  autograd_entry_(nullptr) {}
191  void Init(const mxnet::TShape& shape) {
192  ptr_->Init(shape, this->dtype_);
193  this->shape_ = shape;
194  }
195 
196  void InitDetached(const NDArray* src) {
197  *this = *src;
198  autograd_entry_ = nnvm::NodeEntry(nullptr);
199  }
200  inline void ReInit() {
201  ptr_ = nullptr;
202  Init(kUndefinedStorage, TShape(), -1);
203  }
204  void ReInit(const NDArrayStorageType stype,
205  const mxnet::TShape& shape,
206  Context ctx,
207  int dtype,
208  bool delay_alloc = true,
209  const std::vector<int>* aux_types = nullptr,
210  const mxnet::ShapeVector* aux_shapes = nullptr,
211  const mxnet::TShape* storage_shape = nullptr);
212 
213  void SelfReorder2Default();
217  void SetShapeFromChunk() const;
218  /*
219  * This indicates whether an array is a view of another array (created by
220  * reshape or slice). If an array is a view and the data is stored in
221  * DNNL format, we need to convert the data to the default format when
222  * data in the view is accessed.
223  */
224  inline bool IsView() const {
225  // View only works on the default storage
226  if (storage_type() != kDefaultStorage)
227  return false;
228  // If the array reuses memory, its shape may be different from the storage
229  // shape. However, we shouldn't consider it as a view.
230  if (reuse_)
231  return false;
232  return byte_offset_ > 0 || shape() != ptr_->storage_shape;
233  }
234 
235  /* \brief Check whether the two arrays are the same array */
236  inline bool IsSame(const NDArray& other) const {
237  return ptr_ == other.ptr_ && shape_ == other.shape_ && byte_offset_ == other.byte_offset_ &&
238  dtype_ == other.dtype_;
239  }
240 
244  inline const mxnet::TShape& shape() const {
245  return shape_;
246  }
252  inline const mxnet::TShape& storage_shape() const {
253  CHECK(ptr_ != nullptr);
254  CHECK_NE(storage_type(), kDefaultStorage)
255  << "storage_shape() is not intended for kDefaultStorage.";
256  return ptr_->storage_shape;
257  }
258 
264  inline const mxnet::TShape& aux_shape(size_t index) const {
265  CHECK_NE(storage_type(), kDefaultStorage) << "aux_shape() is not intended for kDefaultStorage.";
266  return ptr_->aux_shapes[index];
267  }
268 
269  /* \return the shapes of all aux data */
271  CHECK_NE(storage_type(), kDefaultStorage)
272  << "aux_shapes() is not intended for kDefaultStorage.";
273  return ptr_->aux_shapes;
274  }
275 
277  const std::vector<int>& aux_types() const {
278  CHECK_NE(storage_type(), kDefaultStorage) << "aux_types() is not intended for kDefaultStorage.";
279  return ptr_->aux_types;
280  }
281 
289  inline void set_aux_shape(size_t index, const mxnet::TShape& shape) const {
290  CHECK_NE(storage_type(), kDefaultStorage)
291  << "set_aux_shape() is not intended for kDefaultStorage.";
292  ptr_->set_aux_shape(index, shape);
293  }
294 
298  inline const TBlob& data() const {
299  if (storage_type() == kDefaultStorage)
300  CheckAndAlloc();
301  SetTBlob();
302  return tblob_;
303  }
307  NDArray grad() const;
308 
312  inline TBlob aux_data(size_t i) const {
313  auto stype = storage_type();
314  TBlob res;
315  auto shape = aux_shape(i);
316  auto type = aux_type(i);
317  MSHADOW_TYPE_SWITCH(type, DType, {
318  auto dptr = static_cast<DType*>(ptr_->aux_handles[i].dptr);
319  CHECK(stype == kRowSparseStorage || stype == kCSRStorage)
320  << "Unexpected storage type: " << stype;
321  res = TBlob(dptr, shape, ptr_->aux_handles[i].ctx.dev_mask(), type);
322  });
323  return res;
324  }
328  inline Context ctx() const {
329  CHECK(!is_none());
330  return ptr_->shandle.ctx;
331  }
335  inline int dtype() const {
336  return dtype_;
337  }
338  inline int aux_type(size_t i) const {
339  CHECK(!is_none());
340  return ptr_->aux_types[i];
341  }
342 
344  return storage_type_;
345  }
347  inline bool is_none() const {
348  return ptr_.get() == nullptr;
349  }
351  bool fresh_out_grad() const;
353  void set_fresh_out_grad(bool state) const;
358  inline bool storage_initialized() const {
359  if (is_none())
360  return false;
361  auto stype = storage_type();
362  CHECK_NE(stype, kDefaultStorage)
363  << "storage_initialized() is not intended for kDefaultStorage.";
364  if (stype == kRowSparseStorage) {
365  CHECK_EQ(aux_shape(rowsparse::kIdx)[0], storage_shape()[0])
366  << "inconsistent storage shape " << storage_shape() << " vs. aux shape "
368  return aux_shape(rowsparse::kIdx).Size() != 0;
369  } else if (stype == kCSRStorage) {
370  CHECK_EQ(aux_shape(csr::kIdx)[0], storage_shape()[0])
371  << "inconsistent storage shape " << storage_shape() << " vs. aux shape "
372  << aux_shape(csr::kIdx);
373  return aux_shape(csr::kIdx).Size() != 0;
374  } else {
375  LOG(FATAL) << "Unknown storage type";
376  }
377  return true;
378  }
381  CHECK(!is_none());
382  CHECK_EQ(storage_type(), kDefaultStorage);
383  CheckAndAlloc();
384  return ptr_->shandle;
385  }
387  void AssignStorageInfo(const std::string& profiler_scope, const std::string& name);
395  void WaitToRead() const;
403  void WaitToWrite() const;
409  void StreamSync(int stream) const;
411  inline Engine::VarHandle var() const {
412  return ptr_->var;
413  }
415  inline size_t byte_offset() const {
416  return byte_offset_;
417  }
419  inline size_t version() const {
420  return var()->version();
421  }
426  void Save(dmlc::Stream* strm) const;
432  bool LegacyLoad(dmlc::Stream* strm, const uint32_t magic);
438  bool Load(dmlc::Stream* strm);
451  NDArray& operator+=(const NDArray& src);
458  NDArray& operator+=(const real_t& src);
465  NDArray& operator-=(const NDArray& src);
472  NDArray& operator-=(const real_t& src);
479  NDArray& operator*=(const NDArray& src);
486  NDArray& operator*=(const real_t& src);
493  NDArray& operator/=(const NDArray& src);
500  NDArray& operator/=(const real_t& src);
506  NDArray Copy(Context ctx) const;
517  void SyncCopyFromCPU(const void* data, size_t size) const;
518 
522  void SyncCopyFromNDArray(const NDArray& src, int i = -1, int j = -1);
523 
534  void SyncCopyToCPU(void* data, size_t size) const;
540  void SyncCheckFormat(const bool full_check) const;
547  NDArray Slice(index_t begin, index_t end) const;
560  NDArray At(index_t idx) const;
571  NDArray aux_ndarray(size_t i) const;
572 
577  NDArray data_ndarray() const;
578 
586  inline NDArray AsArray(const mxnet::TShape& shape, int dtype) const {
587  CHECK_EQ(storage_type(), kDefaultStorage) << "AsArray is intended only for kDefaultStorage.";
588  CHECK_GE(ptr_->shandle.size, shape.Size() * mshadow::mshadow_sizeof(dtype))
589  << "NDArray.AsArray: target memory size is bigger";
590  // We can't reuse memory in a view.
591  CHECK(!IsView());
592  NDArray ret = *this;
593  ret.shape_ = shape;
594  ret.dtype_ = dtype;
595  ret.reuse_ = true;
596  return ret;
597  }
598 
599  inline void InitAsArray(const NDArray& src, const mxnet::TShape& shape, int dtype) {
600  CHECK_EQ(src.storage_type(), kDefaultStorage)
601  << "AsArray is intended only for kDefaultStorage.";
602  CHECK_GE(src.ptr_->shandle.size, shape.Size() * mshadow::mshadow_sizeof(dtype))
603  << "NDArray.AsArray: target memory size is bigger than what was allocated.";
604  // We can't reuse memory in a view.
605  CHECK(!src.IsView());
606  *this = src;
607  shape_ = shape;
608  dtype_ = dtype;
609  reuse_ = true;
610  }
611 
617  DLManagedTensor* ToDLPack() const;
618 
630  static NDArray FromDLPack(const DLManagedTensor* tensor, bool transient_handle);
631 
639  inline void SparseUpdateChunk(const NDArray& arr) const {
640  CHECK(shape_ == arr.shape_) << "ndarray shape is different from the target";
641  CHECK(dtype_ == arr.dtype_) << "ndarray dtype is different from the target";
642  auto stype = arr.storage_type();
643  CHECK(stype == kCSRStorage || stype == kRowSparseStorage)
644  << "Only to be used with CSR and RSP storage types";
645  // swap shandles between src and dst
646  Storage::Handle shandle_dst = arr.ptr_->shandle;
647  arr.ptr_->shandle = ptr_->shandle;
648  ptr_->shandle = shandle_dst;
649 
650  ptr_->storage_shape = arr.ptr_->storage_shape;
651  ptr_->storage_type = arr.ptr_->storage_type;
652  ptr_->ctx = arr.ptr_->ctx;
653 
654  // swap aux_handles between src and dst
655  size_t aux_idx = 0;
656  CHECK(ptr_->aux_handles.size() == arr.ptr_->aux_handles.size())
657  << "ndarray number of aux_handles is different from target";
658  for (auto& aux_handle : arr.ptr_->aux_handles) {
659  Storage::Handle aux_dst = ptr_->aux_handles[aux_idx];
660  ptr_->aux_handles[aux_idx] = aux_handle;
661  aux_handle = aux_dst;
662  aux_idx++;
663  }
664  ptr_->aux_types = arr.ptr_->aux_types;
665  ptr_->aux_shapes = arr.ptr_->aux_shapes;
666  }
667 
673  NDArray Reshape(const mxnet::TShape& shape) const;
684  NDArray Detach() const {
685  NDArray ret(*this);
686  ret.autograd_entry_ = nnvm::NodeEntry(nullptr);
687  ret.deferredcompute_entry_ = nnvm::NodeEntry(nullptr);
688  return ret;
689  }
690 
696  inline void CheckAndAlloc() const {
697  CHECK_EQ(storage_type(), kDefaultStorage);
698  ptr_->CheckAndAlloc();
699  }
700 
711  CHECK_EQ(storage_type(), kDefaultStorage);
712  CHECK(!is_none());
713  shape_ = shape;
714  ptr_->CheckAndAlloc(shape.Size() * mshadow::mshadow_sizeof(dtype_));
715  }
716 
717  /* !
718  * \brief Alloc memory for non-default storage
719  * aux_shape is only known at run time
720  */
721  inline void CheckAndAlloc(const mxnet::ShapeVector& aux_shapes) const {
722  CHECK_NE(storage_type(), kDefaultStorage)
723  << "CheckAndAlloc(aux_shapes) is not intended for kDefaultStorage";
724  ptr_->CheckAndAlloc(shape_, aux_shapes, dtype_);
725  }
726  inline void CheckAndAllocData(const mxnet::TShape& storage_shape) const {
727  CHECK_NE(storage_type(), kDefaultStorage)
728  << "CheckAndAllocData is not intended for kDefaultStorage";
729  ptr_->CheckAndAllocData(storage_shape, dtype_);
730  }
731  inline void CheckAndAllocAuxData(size_t i, const mxnet::TShape& aux_shape) const {
732  CHECK_NE(storage_type(), kDefaultStorage)
733  << "CheckAndAllocAuxData is not intended for kDefaultStorage";
734  ptr_->CheckAndAllocAuxData(i, aux_shape);
735  }
736 
737 #if MXNET_USE_ONEDNN == 1
738  /*
739  * Create NDArray from dnnl memory.
740  * dnnl_mem The dnnl memory to be managed.
741  */
742  explicit NDArray(const std::shared_ptr<dnnl::memory>& dnnl_mem);
743  /*
744  * Create NDArray from dnnl memory descriptor.
745  * mem_pd The dnnl memory descriptor to be created.
746  */
747  explicit NDArray(const void* md);
748  /*
749  * Test if the data is stored in one of special DNNL formats.
750  */
751  bool IsDNNLData() const {
752  return ptr_->IsDNNL();
753  }
754  /*
755  * Test if the data is stored in one of default MXNet formats.
756  */
757  bool IsDefaultData() const {
758  return ptr_->IsDefault();
759  }
760  /*
761  * All functions below return a raw pointer to dnnl memory. Actually there
762  * is a shared pointer that hold the memory either in NDArray or in DNNL
763  * stream. As long as we call these functions inside an operator, the return
764  * memory is always valid.
765  */
766 
767  /*
768  * This function returns dnnl::memory with the default primitive_desc.
769  */
770  const dnnl::memory* GetDNNLData() const;
771  /*
772  * This function returns dnnl::memory with the given primitive_desc
773  * as long as the array size meets the required size in the given primitive_desc.
774  */
775  const dnnl::memory* GetDNNLData(const void* md) const;
776  /*
777  * This function returns dnnl::memory with the given primitive_desc.
778  * The returned dnnl::memory will have the same physical layout as
779  * the given primitive_desc.
780  */
781  const dnnl::memory* GetDNNLDataReorder(const void* md) const;
782 
783  /*
784  * This function copies data from dnnl memory.
785  */
786  void CopyFrom(const dnnl::memory& mem);
787  /*
788  * This function allocates memory for array and creates dnnl memory
789  * with the specified format.
790  */
791  dnnl::memory* CreateDNNLData(const void* md);
792 
793  /*
794  * These are the async version of the methods above.
795  * It changes the layout of this NDArray, but it happens after all accesses to
796  * the array are complete.
797  */
798  void Reorder2DefaultAsync() const;
799  void DNNLDataReorderAsync(const void* md) const;
800 
801  /*
802  * This creates a new NDArray with the reordered data.
803  * It doesn't affect the data of the original NDArray.
804  */
805  NDArray Reorder2Default() const;
806 
807  /*
808  * This creates a new NDArray using f32 with the reordered data.
809  * It doesn't affect the data of the original NDArray.
810  */
811  NDArray Reorder2DefaultFloatFormat() const;
812 
813  void InvalidateDNNLData();
814 
815  /*
816  * This function is used inside operators to reshape an array.
817  * It doesn't change the layout of the original array and allocate memory from
818  * the temporary buffer. The returned array is only valid inside the current
819  * invocation of this operator.
820  * This is different from Reshape. Reshape will cause data in the array to be
821  * converted to the default layout and allocate memory from malloc directly,
822  * which can be expensive.
823  * It's used by FullyConnected right now.
824  */
825  NDArray DNNLDataReshape(const mxnet::TShape& shape) const;
826 
830  void UpdateDNNLMemDesc(const void* desc);
831 #endif
832 
839  static void Save(dmlc::Stream* fo,
840  const std::vector<NDArray>& data,
841  const std::vector<std::string>& names);
848  static void Load(dmlc::Stream* fi, std::vector<NDArray>* data, std::vector<std::string>* keys);
849 
850  private:
851  friend class Imperative;
853  // shandle is used to store the actual values in the NDArray
854  // aux_handles store the aux data(such as indices) if it's needed by non-default storage.
855  struct Chunk {
859  Storage::Handle shandle;
864  std::vector<Storage::Handle> aux_handles;
865 
866 #if MXNET_USE_ONEDNN == 1
867 
869  std::shared_ptr<DNNLMemory> dnnl_mem_;
870 #endif
871 
878  bool static_data;
881  bool delay_alloc;
882  // the type of the storage. The storage_type is never kUndefinedStorage once the chunk
883  // is constructed.
886  std::vector<int> aux_types;
887  // context of data
888  Context ctx;
889  // The shape of the chunk data.
890  // This might not be the same shape as the NDArray, since the storage may be sparse.
891  // The default value for storage_shape is {0} when an empty non-default NDArray is created.
893  // The shape of aux data. The default value for the shape depends on the type of storage.
894  // If aux_shapes[i].Size() is zero, aux data i is empty.
897  std::shared_ptr<Storage> storage_ref_;
899  std::weak_ptr<Engine> engine_ref_;
900 
902  Chunk()
903  : static_data(true),
904  delay_alloc(false),
905  storage_ref_(Storage::_GetSharedRef()),
906  engine_ref_(Engine::_GetSharedRef()) {}
907 
909  Chunk(mxnet::TShape shape, Context ctx_, bool delay_alloc_, int dtype)
910  : static_data(false),
911  delay_alloc(true),
912  ctx(ctx_),
913  storage_ref_(Storage::_GetSharedRef()),
914  engine_ref_(Engine::_GetSharedRef()) {
918  }
919  var = Engine::Get()->NewVariable();
920  shandle.ctx = ctx_;
921  if (!delay_alloc_) {
922  this->CheckAndAlloc();
923  }
924  }
925 
926  Chunk(const TBlob& data, int dev_id)
927  : static_data(true),
928  delay_alloc(false),
929  storage_ref_(Storage::_GetSharedRef()),
930  engine_ref_(Engine::_GetSharedRef()) {
931  CHECK(storage_type == kDefaultStorage);
932  var = Engine::Get()->NewVariable();
933  if (data.dev_mask() == cpu::kDevMask) {
934  ctx = Context::CPU();
935  } else {
936  CHECK_EQ(data.dev_mask(), gpu::kDevMask);
937  ctx = Context::GPU(dev_id);
938  }
939  // init shandle
940  shandle.ctx = ctx;
941  shandle.dptr = data.dptr_;
944  }
945 
946  Chunk(int shared_pid, int shared_id, const mxnet::TShape& shape, int dtype)
947  : static_data(false),
948  delay_alloc(false),
949  storage_ref_(Storage::_GetSharedRef()),
950  engine_ref_(Engine::_GetSharedRef()) {
951  var = Engine::Get()->NewVariable();
952  ctx = Context::CPUShared(0);
954  shandle.ctx = ctx;
955  shandle.shared_pid = shared_pid;
956  shandle.shared_id = shared_id;
957  Storage::Get()->Alloc(&shandle);
959  }
960  // Constructor for a non-default storage chunk
961  Chunk(NDArrayStorageType storage_type_,
962  const mxnet::TShape& storage_shape_,
963  Context ctx_,
964  bool delay_alloc_,
965  int dtype,
966  const std::vector<int>& aux_types_,
967  const mxnet::ShapeVector& aux_shapes_)
968  : static_data(false),
969  delay_alloc(delay_alloc_),
970  storage_type(storage_type_),
971  aux_types(aux_types_),
972  ctx(ctx_),
973  storage_shape(storage_shape_),
974  aux_shapes(aux_shapes_),
975  storage_ref_(Storage::_GetSharedRef()),
976  engine_ref_(Engine::_GetSharedRef()) {
977  shandle.ctx = ctx;
978  var = Engine::Get()->NewVariable();
979  // aux_handles always reflect the correct number of aux data
980  for (size_t i = 0; i < aux_shapes.size(); i++) {
982  // this line is needed in case when aux_shapes[i].Size() = 0
983  // aux_handles[i] will not be updated and take only default value.
984  aux_handles[i].ctx = ctx;
985  }
986  if (!delay_alloc) {
988  }
989  }
990 
991  Chunk(const NDArrayStorageType storage_type_,
992  const TBlob& data,
993  const std::vector<TBlob>& aux_data,
994  int dev_id)
995  : static_data(true),
996  delay_alloc(false),
997  storage_type(storage_type_),
998  storage_ref_(Storage::_GetSharedRef()),
999  engine_ref_(Engine::_GetSharedRef()) {
1000  using namespace mshadow;
1001  CHECK_NE(storage_type, kDefaultStorage);
1002  // init var
1003  var = Engine::Get()->NewVariable();
1004  // init ctx
1005  if (data.dev_mask() == cpu::kDevMask) {
1006  ctx = Context::CPU();
1007  } else {
1008  CHECK_EQ(data.dev_mask(), gpu::kDevMask);
1009  ctx = Context::GPU(dev_id);
1010  }
1011  // init shandle
1012  shandle.ctx = ctx;
1013  shandle.dptr = data.dptr_;
1016  // init aux handles
1017  for (const auto& aux : aux_data) {
1018  Storage::Handle aux_handle;
1019  aux_handle.ctx = ctx;
1020  aux_handle.dptr = aux.dptr_;
1021  aux_handle.size = aux.shape_.Size() * mshadow_sizeof(aux.type_flag_);
1022  aux_handles.push_back(aux_handle);
1023  aux_types.emplace_back(aux.type_flag_);
1024  aux_shapes.emplace_back(aux.shape_);
1025  }
1026  }
1027 
1029  inline void set_aux_shape(const size_t i, const mxnet::TShape& shape) {
1030  aux_shapes[i] = shape;
1031  if (storage_shape.ndim() >= 0) {
1033  storage_shape[0] = shape[0];
1034  } else if (storage_type == kCSRStorage && i == csr::kIdx) {
1035  storage_shape[0] = shape[0];
1036  }
1037  }
1038  }
1039 
1041  inline void CheckAndAlloc(void) {
1042  if (delay_alloc) {
1043  Storage::Get()->Alloc(&shandle);
1044 #if MXNET_USE_ONEDNN == 1
1045  dnnl_mem_ = nullptr;
1046 #endif
1047  delay_alloc = false;
1048  }
1049  }
1050 
1052  // size is the number of bytes
1053  void CheckAndAlloc(uint64_t dbytes) {
1054  CHECK_EQ(kDefaultStorage, storage_type)
1055  << "CheckAndAlloc(dbytes) is only intended for kDefaultStorage";
1056  dbytes = std::max(dbytes, static_cast<uint64_t>(shandle.size));
1057  if (delay_alloc) {
1058  shandle.size = dbytes;
1059  Storage::Get()->Alloc(&shandle);
1060 #if MXNET_USE_ONEDNN == 1
1061  dnnl_mem_ = nullptr;
1062 #endif
1063  delay_alloc = false;
1064  } else if (shandle.size < dbytes) {
1065  // free storage
1066  Storage::Get()->Free(shandle);
1067  // init storage
1068  shandle.size = dbytes;
1069  Storage::Get()->Alloc(&shandle);
1070 #if MXNET_USE_ONEDNN == 1
1071  dnnl_mem_ = nullptr;
1072 #endif
1073  }
1074  }
1076  void Init(const mxnet::TShape& shape, int dtype) {
1077  auto size = shape.Size();
1078  storage_shape = shape;
1079  shandle.size = size * mshadow::mshadow_sizeof(dtype);
1080  this->CheckAndAlloc();
1081  }
1082  inline void CheckAndAlloc(const mxnet::TShape& shape,
1084  int dtype) {
1085  // calculate size, perform allocation
1087  // For row sparse, aux_shape indicates the number of rows to allocate
1091  storage_shape[0] = aux_shape[0];
1093  } else if (kCSRStorage == storage_type) {
1097  } else {
1098  LOG(FATAL) << "Storage type " << storage_type << " not implemented for CheckAndAlloc";
1099  }
1100  }
1101  // create storage handle for data based on shape and dtype, assuming ctx is set
1102  // storage shape is also updated
1103  // if data is already allocated, try reuse the storage. Otherwise, free the current one
1104  // and allocate new storage
1105  void CheckAndAllocData(const mxnet::TShape& shape, int dtype);
1106 
1107 #if MXNET_USE_ONEDNN == 1
1108  // Have DNNL memory reference to the data in the default storage
1109  // or create memory for DNNL.
1110  void SetDNNLMem(const mxnet::TShape& shape, int dtype);
1111  // If the data is stored in DNNL layout, we reorder data in dnnl_mem_ and
1112  // save the result in shandle.
1113  void Reorder2Default();
1114  // Reroder data to a specified layout.
1115  void DNNLDataReorder(const void* md);
1116  bool IsDNNL() const;
1117  bool IsDefault() const;
1118 #endif
1119 
1120  // create storage handle for aux data based on shape
1121  // this function assumes ctx, aux shapes and aux types are set
1122  // aux shape is also updated
1123  // if aux data is already allocated, try reuse the storage. Otherwise, free the current one
1124  // and allocate new storage
1125  inline void CheckAndAllocAuxData(size_t i, const mxnet::TShape& shape) {
1126  CHECK_EQ(shape.ndim(), 1) << "shape must be 1D in CheckAndAllocAuxData";
1127  CHECK_NE(storage_type, kUndefinedStorage)
1128  << "storage type cannot be kUndefinedStorage in CheckAndAllocAuxData";
1129  CHECK_NE(storage_type, kDefaultStorage)
1130  << "storage type cannot be kDefaultStorage in CheckAndAllocAuxData";
1131  if (aux_handles.size() <= i) {
1132  aux_handles.resize(i + 1);
1133  }
1134  size_t aux_bytes = shape.Size() * mshadow::mshadow_sizeof(aux_types[i]);
1135  if (aux_handles[i].size < aux_bytes) {
1136  // free storage
1137  Storage::Get()->Free(aux_handles[i]);
1138  // init aux storage
1139  aux_handles[i] = Storage::Get()->Alloc(aux_bytes, ctx);
1140  }
1141  // init shape
1142  set_aux_shape(i, shape);
1143  }
1145  ~Chunk();
1146  }; // struct Chunk
1147 
1151  inline void Init(const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype) {
1152  shape_ = shape;
1153  dtype_ = dtype;
1154  storage_type_ = stype;
1155  reuse_ = false;
1156  byte_offset_ = 0;
1157  autograd_entry_ = nnvm::NodeEntry(nullptr);
1158  }
1159 
1160  void SetTBlob() const;
1161 
1163  std::shared_ptr<Chunk> ptr_{nullptr};
1168  mutable mxnet::TShape shape_;
1170  size_t byte_offset_ = 0;
1172  int dtype_ = -1;
1174  bool reuse_ = false;
1176  NDArrayStorageType storage_type_ = kUndefinedStorage;
1178  nnvm::NodeEntry autograd_entry_;
1180  nnvm::NodeEntry deferredcompute_entry_;
1188  mutable TBlob tblob_;
1189 }; // class NDArray
1190 
1194 size_t num_aux_data(NDArrayStorageType stype);
1195 
1207 void CopyFromTo(const NDArray& from, const NDArray* to, int priority = 0);
1208 
1222 void CopyFromTo(const NDArray& from, const NDArray& to, int priority = 0, bool is_opr = false);
1223 
1230 void ElementwiseSum(const std::vector<NDArray>& source, NDArray* out, int priority = 0);
1231 
1238 NDArray operator+(const NDArray& lhs, const NDArray& rhs);
1245 NDArray operator+(const NDArray& lhs, const real_t& rhs);
1252 NDArray operator-(const NDArray& lhs, const NDArray& rhs);
1259 NDArray operator-(const NDArray& lhs, const real_t& rhs);
1266 NDArray operator*(const NDArray& lhs, const NDArray& rhs);
1273 NDArray operator*(const NDArray& lhs, const real_t& rhs);
1280 NDArray operator/(const NDArray& lhs, const NDArray& rhs);
1287 NDArray operator/(const NDArray& lhs, const real_t& rhs);
1288 
1293 void RandomSeed(uint32_t seed);
1298 void RandomSeed(Context ctx, uint32_t seed);
1305 void SampleUniform(real_t begin, real_t end, NDArray* out);
1312 void SampleGaussian(real_t mu, real_t sigma, NDArray* out);
1319 void SampleGamma(real_t alpha, real_t beta, NDArray* out);
1325 void SampleExponential(real_t lambda, NDArray* out);
1331 void SamplePoisson(real_t lambda, NDArray* out);
1338 void SampleNegBinomial(int32_t k, real_t p, NDArray* out);
1345 void SampleGenNegBinomial(real_t mu, real_t alpha, NDArray* out);
1346 
1347 //--------------------------------------------------------------
1348 // The following part are API Registration of NDArray functions.
1349 //--------------------------------------------------------------
1350 
1352 typedef std::function<void(NDArray** used_vars,
1353  real_t* scalars,
1354  NDArray** mutate_vars,
1355  int num_params,
1356  char** param_keys,
1357  char** param_vals)>
1374 };
1377  : public dmlc::FunctionRegEntryBase<NDArrayFunctionReg, NDArrayAPIFunction> {
1379  unsigned num_use_vars;
1383  unsigned num_scalars;
1389  NDArrayFunctionReg() : num_use_vars(0), num_mutate_vars(0), num_scalars(0), type_mask(0) {}
1396  inline NDArrayFunctionReg& set_function(void (*fsetvalue)(const real_t& rhs, NDArray* out)) {
1397  body = [fsetvalue](NDArray** used_vars,
1398  real_t* s,
1399  NDArray** mutate_vars,
1400  int num_params,
1401  char** param_keys,
1402  char** param_vals) { (*fsetvalue)(s[0], mutate_vars[0]); };
1403  num_mutate_vars = 1;
1404  num_scalars = 1;
1405  this->add_argument("src", "real_t", "Source input to the function.");
1406  return *this;
1407  }
1415  void (*fternary)(const NDArray& lhs, const NDArray& mhs, const NDArray& rhs, NDArray* out)) {
1416  body = [fternary](NDArray** used_vars,
1417  real_t* s,
1418  NDArray** mutate_vars,
1419  int num_params,
1420  char** param_keys,
1421  char** param_vals) {
1422  (*fternary)(*used_vars[0], *used_vars[1], *used_vars[2], mutate_vars[0]);
1423  };
1424  num_use_vars = 3;
1425  num_mutate_vars = 1;
1427  this->add_argument("lhs", "NDArray", "Left operand to the function.");
1428  this->add_argument("mhs", "NDArray", "Middle operand to the function.");
1429  this->add_argument("rhs", "NDArray", "Right operand to the function.");
1430  return *this;
1431  }
1438  inline NDArrayFunctionReg& set_function(void (*fbinary)(const NDArray& lhs,
1439  const NDArray& rhs,
1440  NDArray* out)) {
1441  body = [fbinary](NDArray** used_vars,
1442  real_t* s,
1443  NDArray** mutate_vars,
1444  int num_params,
1445  char** param_keys,
1446  char** param_vals) {
1447  (*fbinary)(*used_vars[0], *used_vars[1], mutate_vars[0]);
1448  };
1449  num_use_vars = 2;
1450  num_mutate_vars = 1;
1452  this->add_argument("lhs", "NDArray", "Left operand to the function.");
1453  this->add_argument("rhs", "NDArray", "Right operand to the function.");
1454  return *this;
1455  }
1462  inline NDArrayFunctionReg& set_function(void (*fscalar)(const NDArray& lhs,
1463  const real_t& rhs,
1464  NDArray* out)) {
1465  body = [fscalar](NDArray** used_vars,
1466  real_t* s,
1467  NDArray** mutate_vars,
1468  int num_params,
1469  char** param_keys,
1470  char** param_vals) { (*fscalar)(*used_vars[0], s[0], mutate_vars[0]); };
1471  num_use_vars = 1;
1472  num_mutate_vars = 1;
1473  num_scalars = 1;
1475  this->add_argument("lhs", "NDArray", "Left operand to the function.");
1476  this->add_argument("rhs", "real_t", "Right operand to the function.");
1477  return *this;
1478  }
1485  inline NDArrayFunctionReg& set_function(void (*funary)(const NDArray& src, NDArray* out)) {
1486  body = [funary](NDArray** used_vars,
1487  real_t* s,
1488  NDArray** mutate_vars,
1489  int num_params,
1490  char** param_keys,
1491  char** param_vals) { (*funary)(*used_vars[0], mutate_vars[0]); };
1492  num_use_vars = 1;
1493  num_mutate_vars = 1;
1495  this->add_argument("src", "NDArray", "Source input to the function.");
1496  return *this;
1497  }
1505  void (*fgeneric)(NDArray** used_vars,
1506  real_t* s,
1507  NDArray** mutate_vars,
1508  const std::map<std::string, std::string>& param)) {
1509  body = [fgeneric](NDArray** used_vars,
1510  real_t* s,
1511  NDArray** mutate_vars,
1512  int num_params,
1513  char** param_keys,
1514  char** param_vals) {
1515  std::map<std::string, std::string> param;
1516  for (int i = 0; i < num_params; ++i) {
1517  param[param_keys[i]] = param_vals[i];
1518  }
1519  fgeneric(used_vars, s, mutate_vars, param);
1520  };
1521  return *this;
1522  }
1528  inline NDArrayFunctionReg& set_num_use_vars(unsigned n) {
1529  num_use_vars = n;
1530  return *this;
1531  }
1538  num_mutate_vars = n;
1539  return *this;
1540  }
1546  inline NDArrayFunctionReg& set_num_scalars(unsigned n) {
1547  num_scalars = n;
1548  return *this;
1549  }
1555  inline NDArrayFunctionReg& set_type_mask(int tmask) {
1556  type_mask = tmask;
1557  return *this;
1558  }
1559 }; // NDArrayFunctionReg
1560 
1572 #define MXNET_REGISTER_NDARRAY_FUN(name) \
1573  DMLC_REGISTRY_REGISTER(::mxnet::NDArrayFunctionReg, NDArrayFunctionReg, name)
1574 
1575 } // namespace mxnet
1576 
1577 namespace dmlc {
1579 DMLC_DECLARE_TRAITS(has_saveload, mxnet::NDArray, true);
1580 } // namespace dmlc
1581 #endif // MXNET_NDARRAY_H_
mxnet::NDArray::AssignStorageInfo
void AssignStorageInfo(const std::string &profiler_scope, const std::string &name)
assign profiler scope and name to the storage handles
mxnet
namespace of mxnet
Definition: api_registry.h:33
mxnet::NDArray::ReshapeAndAlloc
void ReshapeAndAlloc(const mxnet::TShape &shape)
Allocate the space if the allocation has been delayed or the requested size is bigger than the availa...
Definition: ndarray.h:710
mxnet::NDArrayFunctionReg::set_num_use_vars
NDArrayFunctionReg & set_num_use_vars(unsigned n)
set the number of mutate variables
Definition: ndarray.h:1528
mxnet::NDArray::Detach
NDArray Detach() const
Return a copy of this NDArray without autograd and deferred compute history.
Definition: ndarray.h:684
mxnet::kRSPShapeErr
@ kRSPShapeErr
Definition: ndarray.h:73
mxnet::NDArray::At
NDArray At(index_t idx) const
Index a NDArray.
mshadow::default_type_flag
const int default_type_flag
type enum value for default real type
Definition: base.h:492
mxnet::NDArrayFunctionReg::set_type_mask
NDArrayFunctionReg & set_type_mask(int tmask)
set type mask
Definition: ndarray.h:1555
mxnet::SampleGaussian
void SampleGaussian(real_t mu, real_t sigma, NDArray *out)
Sample gaussian distribution for each elements of out.
mxnet::NDArray::version
size_t version() const
return var version of the NDArray
Definition: ndarray.h:419
mxnet::operator*
NDArray operator*(const NDArray &lhs, const NDArray &rhs)
elementwise multiplication
dmlc::FunctionRegEntryBase
Common base class for function registry.
Definition: registry.h:151
mxnet::NDArray::InitAsArray
void InitAsArray(const NDArray &src, const mxnet::TShape &shape, int dtype)
Definition: ndarray.h:599
mxnet::NDArray::SyncCopyToCPU
void SyncCopyToCPU(void *data, size_t size) const
Do a synchronize copy to a contiguous CPU memory region.
mxnet::csr::CSRAuxType
CSRAuxType
Definition: ndarray.h:54
mxnet::kAcceptEmptyMutateTarget
@ kAcceptEmptyMutateTarget
whether this function allows the handles in the target to be empty NDArray that are not yet initializ...
Definition: ndarray.h:1373
mshadow::expr::scalar
ScalarExp< DType > scalar(DType s)
create an scalar expression
Definition: expression.h:103
mxnet::ElementwiseSum
void ElementwiseSum(const std::vector< NDArray > &source, NDArray *out, int priority=0)
Perform elementwise sum over each data from source, store result into out.
mxnet::Storage::Handle::dptr
void * dptr
Pointer to the data.
Definition: storage.h:60
mxnet::NDArray::CheckAndAllocData
void CheckAndAllocData(const mxnet::TShape &storage_shape) const
Definition: ndarray.h:726
mxnet::SamplePoisson
void SamplePoisson(real_t lambda, NDArray *out)
Sample Poisson distribution for each elements of out.
mxnet::Storage::Get
static Storage * Get()
mxnet::NDArray::set_aux_shape
void set_aux_shape(size_t index, const mxnet::TShape &shape) const
For a sparse operation on a csr matrix for example, the size of the column index array is an estimate...
Definition: ndarray.h:289
mxnet::Engine::NewVariable
virtual VarHandle NewVariable()=0
Allocate a new variable, the variable can then be used to schedule the operation concurrently via dep...
mxnet::NDArray::dtype
int dtype() const
Definition: ndarray.h:335
mxnet::NDArrayFunctionReg
Registry entry for NDArrayFunction.
Definition: ndarray.h:1376
mxnet::NDArray::WaitToRead
void WaitToRead() const
Block until all the pending write operations with respect to current NDArray are finished,...
mxnet::NDArray::NDArray
NDArray(int shared_pid, int shared_id, const mxnet::TShape &shape, int dtype)
create ndarray from shared memory
Definition: ndarray.h:160
mxnet::NDArray::NDArray
NDArray(const TBlob &data, int dev_id, const std::function< void()> &deleter)
constructing a static NDArray that shares data with TBlob which is with deleter Use with caution: all...
Definition: ndarray.h:148
dmlc
namespace for dmlc
Definition: array_view.h:12
mxnet::NDArray::operator*=
NDArray & operator*=(const NDArray &src)
elementwise multiplication to current ndarray this mutate the current NDArray
mxnet::kNormalErr
@ kNormalErr
Definition: ndarray.h:69
mxnet::shape_is_known
bool shape_is_known(const TShape &x)
Definition: tuple.h:699
mxnet::NDArray::Slice
NDArray Slice(index_t begin, index_t end) const
Slice a NDArray.
DMLC_DECLARE_TRAITS
#define DMLC_DECLARE_TRAITS(Trait, Type, Value)
macro to quickly declare traits information
Definition: type_traits.h:126
mxnet::NDArray::operator=
NDArray & operator=(real_t scalar)
set all the elements in ndarray to be scalar
mxnet::NDArrayAPIFunction
std::function< void(NDArray **used_vars, real_t *scalars, NDArray **mutate_vars, int num_params, char **param_keys, char **param_vals)> NDArrayAPIFunction
definition of NDArray function
Definition: ndarray.h:1358
mxnet::kDefaultStorage
@ kDefaultStorage
Definition: ndarray.h:63
mxnet::kCSRIndPtrErr
@ kCSRIndPtrErr
Definition: ndarray.h:71
mxnet::NDArray::ReInit
void ReInit()
Definition: ndarray.h:200
mxnet::NDArray::NDArray
NDArray(const NDArrayStorageType stype, const mxnet::TShape &shape, Context ctx, bool delay_alloc=true, int dtype=mshadow::default_type_flag, const std::vector< int > &aux_types={}, const mxnet::ShapeVector &aux_shapes={}, const mxnet::TShape &storage_shape=mxnet::TShape(mshadow::Shape1(0)))
constructor for NDArray with storage type
Definition: ndarray.h:104
mxnet::kUndefinedStorage
@ kUndefinedStorage
Definition: ndarray.h:62
mxnet::NDArray::NDArray
NDArray(const TBlob &data, int dev_id)
constructing a static NDArray that shares data with TBlob Use with caution: allocate ONLY ONE NDArray...
Definition: ndarray.h:133
mxnet::NDArray::data
const TBlob & data() const
Definition: ndarray.h:298
mxnet::kNDArrayArgBeforeScalar
@ kNDArrayArgBeforeScalar
all the use_vars should go before scalar
Definition: ndarray.h:1362
mxnet::NDArray::SelfReorder2Default
void SelfReorder2Default()
mxnet::kScalarArgBeforeNDArray
@ kScalarArgBeforeNDArray
all the scalar should go before use_vars
Definition: ndarray.h:1364
mxnet::NDArray::storage_handle
Storage::Handle storage_handle() const
get storage handle
Definition: ndarray.h:380
mxnet::NDArray::operator-=
NDArray & operator-=(const NDArray &src)
elementwise subtract from current ndarray this mutate the current NDArray
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*fgeneric)(NDArray **used_vars, real_t *s, NDArray **mutate_vars, const std::map< std::string, std::string > &param))
set the function body to a unary NDArray function this will also auto set the parameters correctly
Definition: ndarray.h:1504
mxnet::NDArray::WaitToWrite
void WaitToWrite() const
Block until all the pending read/write operations with respect to current NDArray are finished,...
mxnet::NDArray::IsSame
bool IsSame(const NDArray &other) const
Definition: ndarray.h:236
base.h
defines configuration macros
mxnet::NDArray::SyncCheckFormat
void SyncCheckFormat(const bool full_check) const
check whether the NDArray format is valid
mxnet::NDArray::storage_shape
const mxnet::TShape & storage_shape() const
Definition: ndarray.h:252
mxnet::csr::kIndPtr
@ kIndPtr
Definition: ndarray.h:54
mxnet::NDArray::data_ndarray
NDArray data_ndarray() const
Generate a deep copy of data() returned as a default storage type NDArray.
mxnet::TBlob::dptr_
void * dptr_
pointer to the data
Definition: tensor_blob.h:70
mxnet::NDArray::IsView
bool IsView() const
Definition: ndarray.h:224
mxnet::NDArray::AsArray
NDArray AsArray(const mxnet::TShape &shape, int dtype) const
Create a NDArray that shares memory with current one The new array must have smaller memory size than...
Definition: ndarray.h:586
mshadow::cpu::kDevMask
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:43
mxnet::NDArrayFunctionReg::NDArrayFunctionReg
NDArrayFunctionReg()
constructor
Definition: ndarray.h:1389
mxnet::Context::GPU
static Context GPU(int32_t dev_id=-1)
mxnet::NDArray::ReshapeWithRecord
NDArray ReshapeWithRecord(const mxnet::TShape &shape)
Get an reshaped NDArray. Supports autograd recording.
mxnet::Tuple::ndim
int ndim() const
Definition: tuple.h:217
mxnet::NDArray::NDArray
NDArray(const NDArrayStorageType stype, const mxnet::TShape &shape, const TBlob &data, const std::vector< TBlob > &aux_data, int dev_id)
constructing a static NDArray of non-default storage that shares data with TBlob Use with caution: al...
Definition: ndarray.h:177
mxnet::NDArray::aux_ndarray
NDArray aux_ndarray(size_t i) const
Generate a deep copy of aux_data(i) returned as a default storage type NDArray.
DLManagedTensor
C Tensor object, manage memory of DLTensor. This data structure is intended to facilitate the borrowi...
Definition: dlpack.h:157
mxnet::Storage::Handle::size
size_t size
Size of the storage.
Definition: storage.h:64
mxnet::operator+
NDArray operator+(const NDArray &lhs, const NDArray &rhs)
elementwise add
mxnet::Engine::Get
static Engine * Get()
mxnet::csr::kIdx
@ kIdx
Definition: ndarray.h:54
mxnet::NDArray::aux_type
int aux_type(size_t i) const
Definition: ndarray.h:338
mxnet::NDArray::CheckAndAlloc
void CheckAndAlloc() const
Allocate the space if it is delayed allocated. This is an internal function used by system that norma...
Definition: ndarray.h:696
mxnet::SampleExponential
void SampleExponential(real_t lambda, NDArray *out)
Sample exponential distribution for each elements of out.
mxnet::NDArray::grad
NDArray grad() const
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*funary)(const NDArray &src, NDArray *out))
set the function body to a unary NDArray function this will also auto set the parameters correctly
Definition: ndarray.h:1485
mxnet::NDArray::aux_data
TBlob aux_data(size_t i) const
Definition: ndarray.h:312
mxnet::TBlob::dev_mask
int dev_mask() const
device mask of the corresponding device
Definition: tensor_blob.h:257
nnvm::Symbol
Symbol is help class used to represent the operator node in Graph.
Definition: symbolic.h:50
mxnet::NDArray::aux_shape
const mxnet::TShape & aux_shape(size_t index) const
get the shape of aux_data(index)
Definition: ndarray.h:264
mxnet::kCSRIdxErr
@ kCSRIdxErr
Definition: ndarray.h:72
mxnet::NDArrayStorageType
NDArrayStorageType
Definition: ndarray.h:61
mxnet::NDArray::NDArray
NDArray(const mxnet::TShape &shape, Context ctx, bool delay_alloc=false, int dtype=mshadow::default_type_flag)
constructs a new dynamic NDArray
Definition: ndarray.h:93
mxnet::NDArray::aux_types
const std::vector< int > & aux_types() const
Definition: ndarray.h:277
mxnet::NDArray::NDArray
NDArray(Context ctx, int dtype=mshadow::default_type_flag)
constructs a new dynamic NDArray whose shape is unknown, hence the NDArray is inherently lazily creat...
Definition: ndarray.h:120
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*fbinary)(const NDArray &lhs, const NDArray &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly
Definition: ndarray.h:1438
mxnet::kCSRShapeErr
@ kCSRShapeErr
Definition: ndarray.h:70
mxnet::Context::CPUShared
static Context CPUShared(int32_t dev_id=0)
mxnet::NDArray::FromDLPack
static NDArray FromDLPack(const DLManagedTensor *tensor, bool transient_handle)
Create a NDArray backed by a dlpack tensor.
mxnet::TBlob::type_flag_
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*fsetvalue)(const real_t &rhs, NDArray *out))
set the function body to a NDArray setvalue function this will also auto set the parameters correctly
Definition: ndarray.h:1396
mxnet::kRSPIdxErr
@ kRSPIdxErr
Definition: ndarray.h:74
mxnet::NDArray::Save
void Save(dmlc::Stream *strm) const
save the content into binary stream
mxnet::NDArray::is_none
bool is_none() const
Definition: ndarray.h:347
mxnet::NDArray::AtWithRecord
NDArray AtWithRecord(index_t idx)
Index a NDArray.
mxnet::NDArray::set_fresh_out_grad
void set_fresh_out_grad(bool state) const
mxnet::SampleGenNegBinomial
void SampleGenNegBinomial(real_t mu, real_t alpha, NDArray *out)
Sample generalized negative binomial distribution for each elements of out.
mxnet::NDArray::byte_offset
size_t byte_offset() const
Definition: ndarray.h:415
mxnet::NDArray::StreamSync
void StreamSync(int stream) const
Synchronize the destination stream provided by consumer with the source stream that current NDArray l...
mxnet::NDArrayFunctionReg::num_use_vars
unsigned num_use_vars
number of variable used by this function
Definition: ndarray.h:1379
mxnet::Storage::Handle
Storage handle.
Definition: storage.h:56
mxnet::NDArray
ndarray interface
Definition: ndarray.h:82
mxnet::NDArray::SyncCopyFromCPU
void SyncCopyFromCPU(const void *data, size_t size) const
Do a synchronize copy from a contiguous CPU memory region.
mxnet::NDArray::ctx
Context ctx() const
Definition: ndarray.h:328
mxnet::TBlob
tensor blob class that can be used to hold tensor of any dimension, any device and any data type,...
Definition: tensor_blob.h:65
mxnet::NDArray::shape
const mxnet::TShape & shape() const
Definition: ndarray.h:244
mxnet::NDArray::Copy
NDArray Copy(Context ctx) const
return a new copy this NDArray
mxnet::NDArray::get_autograd_symbol
nnvm::Symbol get_autograd_symbol() const
mxnet::NDArrayFunctionReg::num_scalars
unsigned num_scalars
number of scalars used by this function
Definition: ndarray.h:1383
mxnet::Engine
Dependency engine that schedules operations.
Definition: engine.h:213
mxnet::NDArray::Reshape
NDArray Reshape(const mxnet::TShape &shape) const
Get an reshaped NDArray.
mxnet::NDArray::InitDetached
void InitDetached(const NDArray *src)
Definition: ndarray.h:196
mxnet::NDArray::CheckAndAllocAuxData
void CheckAndAllocAuxData(size_t i, const mxnet::TShape &aux_shape) const
Definition: ndarray.h:731
mxnet::Storage::Handle::shared_pid
int shared_pid
Id for IPC shared memory.
Definition: storage.h:72
io.h
defines serializable interface of dmlc
mshadow::mshadow_sizeof
size_t mshadow_sizeof(int type)
get data type size from type enum
Definition: base.h:1804
mxnet::NDArray::aux_shapes
const mxnet::ShapeVector & aux_shapes() const
Definition: ndarray.h:270
mxnet::NDArrayFunctionTypeMask
NDArrayFunctionTypeMask
mask information on how functions can be exposed
Definition: ndarray.h:1360
mxnet::Context
Context information about the execution environment.
Definition: base.h:90
mxnet::Imperative
runtime functions for NDArray
Definition: imperative.h:61
mxnet::TShape::Size
size_t Size() const
Definition: tuple.h:523
mxnet::Storage::Free
virtual void Free(Handle handle)=0
Free storage.
mxnet::NDArray::SetShapeFromChunk
void SetShapeFromChunk() const
set the correct shape of NDArray directly from the storage_shape of its own chunk.
mxnet::NDArray::Load
bool Load(dmlc::Stream *strm)
load the content from binary stream
mshadow
overloaded + operator between half_t and bf16_t
Definition: base.h:319
storage.h
Storage manager across multiple devices.
mxnet::RandomSeed
void RandomSeed(uint32_t seed)
Seed all random number generator in mxnet.
mxnet::NDArray::SparseUpdateChunk
void SparseUpdateChunk(const NDArray &arr) const
Update ndarray chunk storage handles using existing ndarray storage handles Also update the aux_handl...
Definition: ndarray.h:639
mxnet::SampleUniform
void SampleUniform(real_t begin, real_t end, NDArray *out)
Sample uniform distribution for each elements of out.
mxnet::engine::Var
base class of engine variables.
Definition: engine.h:111
mxnet::Storage::Handle::ctx
Context ctx
Context information about device and ID.
Definition: storage.h:68
mxnet::NDArray::var
Engine::VarHandle var() const
Definition: ndarray.h:411
mxnet::NDArray::operator/=
NDArray & operator/=(const NDArray &src)
elementwise division from current ndarray this mutate the current NDArray
mxnet::NDArrayFormatErr
NDArrayFormatErr
Definition: ndarray.h:68
mxnet::index_t
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:81
mxnet::kCSRStorage
@ kCSRStorage
Definition: ndarray.h:65
std
Definition: optional.h:251
mxnet::NDArray::ToDLPack
DLManagedTensor * ToDLPack() const
Create a reference view of NDArray that represents as DLManagedTensor.
type_traits.h
type traits information header
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*fternary)(const NDArray &lhs, const NDArray &mhs, const NDArray &rhs, NDArray *out))
set the function body to a ternary NDArray function this will also auto set the parameters correctly
Definition: ndarray.h:1414
mxnet::NDArray::Init
void Init(const mxnet::TShape &shape)
initialize the NDArray, assuming it is not assigned a meaningful shape before
Definition: ndarray.h:191
mxnet::NDArrayFunctionReg::set_num_mutate_vars
NDArrayFunctionReg & set_num_mutate_vars(unsigned n)
set the number of mutate variables
Definition: ndarray.h:1537
mxnet::Storage::Alloc
Handle Alloc(size_t size, Context ctx, bool failsafe=false)
Allocate a new contiguous memory for a given size.
Definition: storage.h:92
mxnet::operator-
NDArray operator-(const NDArray &lhs, const NDArray &rhs)
elementwise subtraction
mxnet::Context::CPU
static Context CPU(int32_t dev_id=0)
mxnet::num_aux_data
size_t num_aux_data(NDArrayStorageType stype)
mxnet::ShapeVector
std::vector< mxnet::TShape > ShapeVector
The result holder of shape of each NodeEntry in the graph.
Definition: tuple.h:830
mxnet::rowsparse::kIdx
@ kIdx
Definition: ndarray.h:58
nnvm::NodeEntry
an entry that represents output data from a node
Definition: node.h:52
mshadow::Shape1
MSHADOW_XINLINE Shape< 1 > Shape1(index_t s0)
construct a one dimension shape, stride will equal s0
Definition: tensor.h:220
mxnet::SampleNegBinomial
void SampleNegBinomial(int32_t k, real_t p, NDArray *out)
Sample negative binomial distribution for each elements of out.
mxnet::TBlob::shape_
mxnet::TShape shape_
shape of the tensor
Definition: tensor_blob.h:72
mxnet::TShape
A Shape class that is used to represent shape of each tensor.
Definition: tuple.h:440
dnnl
Definition: ndarray.h:47
mxnet::SampleGamma
void SampleGamma(real_t alpha, real_t beta, NDArray *out)
Sample gamma distribution for each elements of out.
engine.h
Engine that schedules all the operations according to dependency.
mxnet::NDArray::operator+=
NDArray & operator+=(const NDArray &src)
elementwise add to current space this mutate the current NDArray
mxnet::NDArrayFunctionReg::set_function
NDArrayFunctionReg & set_function(void(*fscalar)(const NDArray &lhs, const real_t &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly
Definition: ndarray.h:1462
registry.h
Registry utility that helps to build registry singletons.
mxnet::Storage::Handle::shared_id
int shared_id
Definition: storage.h:73
node.h
Graph node data structure.
mxnet::NDArray::fresh_out_grad
bool fresh_out_grad() const
mxnet::rowsparse::RowSparseAuxType
RowSparseAuxType
Definition: ndarray.h:58
mshadow::gpu::kDevMask
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:50
mxnet::operator/
NDArray operator/(const NDArray &lhs, const NDArray &rhs)
elementwise division
mxnet::NDArray::storage_type
NDArrayStorageType storage_type() const
Definition: ndarray.h:343
mxnet::engine::Var::version
virtual size_t version()
Definition: engine.h:112
dmlc::Stream
interface of stream I/O for serialization
Definition: io.h:30
mxnet::kRowSparseStorage
@ kRowSparseStorage
Definition: ndarray.h:64
mxnet::Storage
Storage manager across multiple devices.
Definition: storage.h:40
mxnet::NDArrayFunctionReg::set_num_scalars
NDArrayFunctionReg & set_num_scalars(unsigned n)
set the number of scalar arguments
Definition: ndarray.h:1546
mxnet::NDArray::LegacyLoad
bool LegacyLoad(dmlc::Stream *strm, const uint32_t magic)
load ndarrays before supporting sparse ndarrays
mxnet::CopyFromTo
void CopyFromTo(const NDArray &from, const NDArray *to, int priority=0)
issue an copy operation from one NDArray to another the two ndarray can sit on different devices this...
mxnet::NDArrayFunctionReg::num_mutate_vars
unsigned num_mutate_vars
number of variable mutated by this function
Definition: ndarray.h:1381
mxnet::NDArray::storage_initialized
bool storage_initialized() const
Returns true if a sparse ndarray's aux_data and storage are initialized Throws an exception if the in...
Definition: ndarray.h:358
mxnet::NDArray::NDArray
NDArray()
default constructor
Definition: ndarray.h:85
mxnet::NDArray::SyncCopyFromNDArray
void SyncCopyFromNDArray(const NDArray &src, int i=-1, int j=-1)
Copy from src.data()/aux_data(i) to this->data()/aux_data(j)
mxnet::real_t
mshadow::default_real_t real_t
data type that will be used to store ndarray
Definition: base.h:85
mxnet::NDArray::CheckAndAlloc
void CheckAndAlloc(const mxnet::ShapeVector &aux_shapes) const
Definition: ndarray.h:721
mxnet::NDArray::SliceWithRecord
NDArray SliceWithRecord(index_t begin, index_t end)
Slice a NDArray. Supports recording with autograd.
mxnet::NDArrayFunctionReg::type_mask
int type_mask
information on how function should be called from API
Definition: ndarray.h:1385
MSHADOW_TYPE_SWITCH
#define MSHADOW_TYPE_SWITCH(type, DType,...)
Definition: base.h:1163