mxnet
utils.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
25 #ifndef MXNET_COMMON_UTILS_H_
26 #define MXNET_COMMON_UTILS_H_
27 
28 #include <dmlc/logging.h>
29 #include <dmlc/omp.h>
30 #include <nnvm/graph.h>
31 #include <mxnet/engine.h>
32 #include <mxnet/ndarray.h>
33 #include <mxnet/op_attr_types.h>
34 #include <mxnet/graph_attr_types.h>
35 #include <nnvm/graph_attr_types.h>
36 
37 #include <memory>
38 #include <vector>
39 #include <type_traits>
40 #include <utility>
41 #include <random>
42 #include <string>
43 #include <thread>
44 #include <algorithm>
45 #include <functional>
46 #include <limits>
47 
48 #include "../operator/mxnet_op.h"
49 
50 namespace mxnet {
51 namespace common {
52 
58  template<typename DType, typename IType>
59  MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
60  const nnvm::dim_t end, const nnvm::dim_t idx_size) {
61  if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
62  (i == 0 && indptr[i] != 0) ||
63  (i == end - 1 && indptr[end] != idx_size))
64  *out = kCSRIndPtrErr;
65  }
66 };
67 
72 struct csr_idx_check {
73  template<typename DType, typename IType, typename RType>
74  MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
75  const RType* indptr, const nnvm::dim_t ncols) {
76  for (RType j = indptr[i]; j < indptr[i+1]; j++) {
77  if (idx[j] >= ncols || idx[j] < 0 ||
78  (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
79  *out = kCSRIdxErr;
80  break;
81  }
82  }
83  }
84 };
85 
90 struct rsp_idx_check {
91  template<typename DType, typename IType>
92  MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
93  const nnvm::dim_t end, const nnvm::dim_t nrows) {
94  if ((i < end && idx[i+1] <= idx[i])
95  || idx[i] < 0 || idx[i] >= nrows)
96  *out = kRSPIdxErr;
97  }
98 };
99 
100 template<typename xpu>
101 void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
102  const TBlob &err_cpu, const bool full_check);
103 
112 template<typename xpu>
113 void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
114  const TBlob &err_cpu, const bool full_check) {
115  using namespace op::mxnet_op;
116  CHECK_EQ(input.storage_type(), kCSRStorage)
117  << "CheckFormatCSRImpl is for CSRNDArray";
118  const TShape shape = input.shape();
119  const TShape idx_shape = input.aux_shape(csr::kIdx);
120  const TShape indptr_shape = input.aux_shape(csr::kIndPtr);
121  const TShape storage_shape = input.storage_shape();
122  if ((shape.ndim() != 2) ||
123  (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
124  (indptr_shape[0] != shape[0] + 1) ||
125  (idx_shape[0] != storage_shape[0])) {
126  MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
127  DType* err = err_cpu.dptr<DType>();
128  *err = kCSRShapeErr;
129  });
130  return;
131  }
132  if (full_check) {
133  MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
134  MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
135  MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
136  mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
137  NDArray ret_xpu = NDArray(mshadow::Shape1(1),
138  rctx.get_ctx(), false, err_cpu.type_flag_);
139  TBlob val_xpu = ret_xpu.data();
140  Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
141  Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
142  input.aux_data(csr::kIndPtr).dptr<RType>(),
143  indptr_shape[0] - 1, idx_shape[0]);
144  // no need to check indices if indices are empty
145  if (idx_shape[0] != 0) {
146  Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
147  input.aux_data(csr::kIdx).dptr<IType>(),
148  input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
149  }
150  mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
151  val_xpu.get<xpu, 1, DType>(s), s);
152  });
153  });
154  });
155  }
156 }
157 
166 template<typename xpu>
167 void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
168  const TBlob &err_cpu, const bool full_check) {
169  using namespace op::mxnet_op;
170  CHECK_EQ(input.storage_type(), kRowSparseStorage)
171  << "CheckFormatRSPImpl is for RSPNDArray";
172  const TShape idx_shape = input.aux_shape(rowsparse::kIdx);
173  if (idx_shape[0] != input.storage_shape()[0]) {
174  MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
175  DType* err = err_cpu.dptr<DType>();
176  *err = kRSPShapeErr;
177  });
178  return;
179  }
180  if (idx_shape[0] == 0) {
181  return;
182  }
183  if (full_check) {
184  MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
185  MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
186  mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
187  NDArray ret_xpu = NDArray(mshadow::Shape1(1),
188  rctx.get_ctx(), false, err_cpu.type_flag_);
189  TBlob val_xpu = ret_xpu.data();
190  Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
191 
192  Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
193  val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
194  idx_shape[0] - 1, input.shape()[0]);
195  mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
196  val_xpu.get<xpu, 1, DType>(s), s);
197  });
198  });
199  }
200 }
201 
202 template<typename xpu>
203 void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
204  const TBlob &err_cpu, const bool full_check) {
205  int stype = input.storage_type();
206  if (stype == kCSRStorage) {
207  CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
208  } else if (stype == kRowSparseStorage) {
209  CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
210  } else if (stype == kDefaultStorage) {
211  // no-op for default storage
212  } else {
213  LOG(FATAL) << "Unknown storage type " << stype;
214  }
215 }
216 
220 template<typename xpu>
221 void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
222  const NDArray& input_nd,
223  const TBlob& idx_data,
224  const OpReqType req,
225  NDArray* output_nd);
226 
227 /* \brief Casts tensor storage type to the new type.
228  */
229 template<typename xpu>
230 void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
231 
235 inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
236  const NDArrayStorageType stype) {
237  if (!vstorage.empty()) {
238  for (const auto& i : vstorage) {
239  if (i != stype) return false;
240  }
241  return true;
242  }
243  return false;
244 }
245 
250 inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
251  const NDArrayStorageType stype1,
252  const NDArrayStorageType stype2,
253  bool *has_both) {
254  if (has_both) {
255  *has_both = false;
256  }
257  if (!vstorage.empty()) {
258  uint8_t has = 0;
259  for (const auto i : vstorage) {
260  if (i == stype1) {
261  has |= 1;
262  } else if (i == stype2) {
263  has |= 2;
264  } else {
265  return false;
266  }
267  }
268  if (has_both) {
269  *has_both = has == 3;
270  }
271  return true;
272  }
273  return false;
274 }
275 
279 inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
280  const NDArrayStorageType stype) {
281  if (!ndarrays.empty()) {
282  for (const auto& nd : ndarrays) {
283  if (nd.storage_type() != stype) {
284  return false;
285  }
286  }
287  return true;
288  }
289  return false;
290 }
291 
295 inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
296  const NDArrayStorageType stype1,
297  const NDArrayStorageType stype2,
298  bool *has_both) {
299  if (has_both) {
300  *has_both = false;
301  }
302  if (!ndarrays.empty()) {
303  uint8_t has = 0;
304  for (const auto& nd : ndarrays) {
305  const NDArrayStorageType stype = nd.storage_type();
306  if (stype == stype1) {
307  has |= 1;
308  } else if (stype == stype2) {
309  has |= 2;
310  } else {
311  return false;
312  }
313  }
314  if (has_both) {
315  *has_both = has == 3;
316  }
317  return true;
318  }
319  return false;
320 }
321 
325 inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
326  const NDArrayStorageType stype) {
327  if (!ndarrays.empty()) {
328  for (const auto& nd : ndarrays) {
329  if (nd.storage_type() == stype) {
330  return true;
331  }
332  }
333  }
334  return false;
335 }
336 
340 inline bool ContainsStorageType(const std::vector<int>& ndstypes,
341  const NDArrayStorageType stype) {
342  if (!ndstypes.empty()) {
343  for (const auto& ndstype : ndstypes) {
344  if (ndstype == stype) {
345  return true;
346  }
347  }
348  }
349  return false;
350 }
351 
353 inline std::string dispatch_mode_string(const DispatchMode x) {
354  switch (x) {
356  return "fcompute";
358  return "fcompute_ex";
360  return "fcompute_fallback";
362  return "variable";
364  return "undefined";
365  }
366  return "unknown";
367 }
368 
369 
371 inline std::string stype_string(const int x) {
372  switch (x) {
373  case kDefaultStorage:
374  return "default";
375  case kCSRStorage:
376  return "csr";
377  case kRowSparseStorage:
378  return "row_sparse";
379  }
380  return "unknown";
381 }
382 
384 inline std::string dev_type_string(const int dev_type) {
385  switch (dev_type) {
386  case Context::kCPU:
387  return "cpu";
388  case Context::kGPU:
389  return "gpu";
390  case Context::kCPUPinned:
391  return "cpu_pinned";
392  case Context::kCPUShared:
393  return "cpu_shared";
394  }
395  return "unknown";
396 }
397 
399 inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
400  const int dev_mask,
401  const std::vector<int>& in_attrs,
402  const std::vector<int>& out_attrs) {
403  std::ostringstream os;
404  os << "operator = " << attrs.op->name
405  << "\ninput storage types = [";
406  for (const int attr : in_attrs) {
407  os << stype_string(attr) << ", ";
408  }
409  os << "]\n"
410  << "output storage types = [";
411  for (const int attr : out_attrs) {
412  os << stype_string(attr) << ", ";
413  }
414  os << "]\n"
415  << "params = {";
416  for (auto kv : attrs.dict) {
417  os << "\"" << kv.first << "\" : " << kv.second << ", ";
418  }
419  os << "}\n"
420  << "context.dev_mask = " << dev_type_string(dev_mask);
421  return os.str();
422 }
423 
425 inline std::string operator_string(const nnvm::NodeAttrs& attrs,
426  const OpContext& ctx,
427  const std::vector<NDArray>& inputs,
428  const std::vector<OpReqType>& req,
429  const std::vector<NDArray>& outputs) {
430  std::string result = "";
431  std::vector<int> in_stypes;
432  std::vector<int> out_stypes;
433  in_stypes.reserve(inputs.size());
434  out_stypes.reserve(outputs.size());
435  auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
436  std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
437  std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
438  result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
439  return result;
440 }
441 
443 inline void LogOnce(const std::string& message) {
444  typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
445  auto log_store = LogStore::Get();
446  if (log_store->find(message) == log_store->end()) {
447  LOG(INFO) << message;
448  log_store->insert(message);
449  }
450 }
451 
454 inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
455  const int dev_mask,
456  const std::vector<int>* in_attrs,
457  const std::vector<int>* out_attrs) {
458  static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
459  if (!log) return;
460  const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
461  std::ostringstream os;
462  const char* warning = "\nThe operator with default storage type will be dispatched "
463  "for execution. You're seeing this warning message because the operator above is unable "
464  "to process the given ndarrays with specified storage types, context and parameter. "
465  "Temporary dense ndarrays are generated in order to execute the operator. "
466  "This does not affect the correctness of the programme. "
467  "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
468  "0 to suppress this warning.";
469  os << "\nStorage type fallback detected:\n" << op_str << warning;
470  LogOnce(os.str());
471 }
472 
473 // heuristic to dermine number of threads per GPU
474 inline int GetNumThreadsPerGPU() {
475  // This is resource efficient option.
476  return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
477 }
478 
479 // heuristic to get number of matching colors.
480 // this decides how much parallelism we can get in each GPU.
481 inline int GetExecNumMatchColor() {
482  // This is resource efficient option.
483  int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
484  return std::min(num_match_color, GetNumThreadsPerGPU());
485 }
486 
487 template<typename T, typename V>
488 V ParallelAccumulate(const T* a, const int n, V start) {
489  V sum = start;
490 #pragma omp parallel for reduction(+:sum)
491  for (int i = 0; i < n; ++i) {
492  sum += a[i];
493  }
494  return sum;
495 }
496 
504 template<typename RandomIt, typename Compare>
505 void ParallelSortHelper(RandomIt first, size_t len,
506  size_t grainsize, const Compare& comp) {
507  if (len < grainsize) {
508  std::sort(first, first+len, comp);
509  } else {
510  std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
511  ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
512  thr.join();
513  std::inplace_merge(first, first+len/2, first+len, comp);
514  }
515 }
516 
526 template<typename RandomIt, typename Compare>
527 void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
528  const auto num = std::distance(first, last);
529  size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
530  ParallelSortHelper(first, num, grainsize, comp);
531 }
532 
542 template<typename RandomIt>
543 void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
544  ParallelSort(first, last, num_threads,
545  std::less<typename std::iterator_traits<RandomIt>::value_type>());
546 }
547 
551 typedef std::mt19937 RANDOM_ENGINE;
552 
556 namespace helper {
557 
561 template <class T>
562 struct UniqueIf {
566  using SingleObject = std::unique_ptr<T>;
567 };
568 
572 template <class T>
573 struct UniqueIf<T[]> {
577  using UnknownBound = std::unique_ptr<T[]>;
578 };
579 
583 template <class T, size_t kSize>
584 struct UniqueIf<T[kSize]> {
588  using KnownBound = void;
589 };
590 
591 } // namespace helper
592 
604 template <class T, class... Args>
606  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
607 }
608 
618 template <class T>
620  using U = typename std::remove_extent<T>::type;
621  return std::unique_ptr<T>(new U[n]{});
622 }
623 
632 template <class T, class... Args>
633 typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
634 
635 template<typename FCompType>
636 FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
637  const Context& ctx) {
638  static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
639  static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
640 
641  if (ctx.dev_mask() == cpu::kDevMask) {
642  return fcompute_cpu.get(op, nullptr);
643  } else if (ctx.dev_mask() == gpu::kDevMask) {
644  return fcompute_gpu.get(op, nullptr);
645  } else {
646  LOG(FATAL) << "Unknown device mask";
647  return nullptr;
648  }
649 }
650 
654 template <typename T>
655 constexpr size_t MaxIntegerValue() {
656  return std::is_integral<T>::value ?
657  std::numeric_limits<T>::max():
658  size_t(2) << (std::numeric_limits<T>::digits - 1);
659 }
660 
661 template <>
662 constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
663  return size_t(2) << 10;
664 }
665 
666 MSHADOW_XINLINE int ilog2ul(size_t a) {
667  int k = 1;
668  while (a >>= 1) ++k;
669  return k;
670 }
671 
672 MSHADOW_XINLINE int ilog2ui(unsigned int a) {
673  int k = 1;
674  while (a >>= 1) ++k;
675  return k;
676 }
677 
681 inline NDArray InitZeros(const NDArrayStorageType stype, const TShape &shape,
682  const Context &ctx, const int dtype) {
683  // NDArray with default storage
684  if (stype == kDefaultStorage) {
685  NDArray ret(shape, ctx, false, dtype);
686  ret = 0;
687  return ret;
688  }
689  // NDArray with non-default storage. Storage allocation is always delayed.
690  return NDArray(stype, shape, ctx, true, dtype);
691 }
692 
696 inline void EmplaceBackZeros(const NDArrayStorageType stype, const TShape &shape,
697  const Context &ctx, const int dtype,
698  std::vector<NDArray> *vec) {
699  // NDArray with default storage
700  if (stype == kDefaultStorage) {
701  vec->emplace_back(shape, ctx, false, dtype);
702  vec->back() = 0;
703  } else {
704  // NDArray with non-default storage. Storage allocation is always delayed.
705  vec->emplace_back(stype, shape, ctx, true, dtype);
706  }
707 }
708 
709 } // namespace common
710 } // namespace mxnet
711 #endif // MXNET_COMMON_UTILS_H_
Definition: ndarray.h:74
static MSHADOW_XINLINE void Map(int i, DType *out, const IType *idx, const RType *indptr, const nnvm::dim_t ncols)
Definition: utils.h:74
Definition: ndarray.h:63
NDArrayStorageType
Definition: ndarray.h:61
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check)
Check the validity of CSRNDArray.
Definition: utils.h:113
DeviceType dev_mask() const
Get corresponding device mask.
Definition: base.h:151
Definition: ndarray.h:54
NDArrayStorageType storage_type() const
Definition: ndarray.h:269
Engine that schedules all the operations according to dependency.
void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check)
Definition: utils.h:203
int GetNumThreadsPerGPU()
Definition: utils.h:474
void SparseRetainOpForwardRspWrapper(mshadow::Stream< xpu > *s, const NDArray &input_nd, const TBlob &idx_data, const OpReqType req, NDArray *output_nd)
Pick rows specified by user input index array from a row sparse ndarray and save them in the output s...
const TShape & storage_shape() const
Definition: ndarray.h:177
std::string operator_stype_string(const nnvm::NodeAttrs &attrs, const int dev_mask, const std::vector< int > &in_attrs, const std::vector< int > &out_attrs)
get string representation of the operator stypes
Definition: utils.h:399
namespace of mxnet
Definition: base.h:118
Additional operator attributes beside the ones provided by NNVM.
void KnownBound
Type of T.
Definition: utils.h:588
void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare &comp)
Helper function for ParallelSort. DO NOT call this function directly. Use the interface ParallelSort ...
Definition: utils.h:505
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
FCompType GetFCompute(const nnvm::Op *op, const std::string &name, const Context &ctx)
Definition: utils.h:636
V ParallelAccumulate(const T *a, const int n, V start)
Definition: utils.h:488
void LogOnce(const std::string &message)
log message once. Intended for storage fallback warning messages.
Definition: utils.h:443
nnvm::TShape TShape
Shape data structure used to record shape information.
Definition: base.h:128
Context ctx
base Context
Definition: base.h:259
Definition: ndarray.h:72
execution time context. The information needed in runtime for actual execution.
Definition: base.h:257
DispatchMode
the dispatch mode of the operator
Definition: op_attr_types.h:112
NDArray InitZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype)
Return an NDArray of all zeros.
Definition: utils.h:681
std::string stype_string(const int x)
get string representation of storage_type
Definition: utils.h:371
Definition: ndarray.h:65
void CastStorageDispatch(const OpContext &ctx, const NDArray &input, const NDArray &output)
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check)
Definition: base.h:136
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp)
Sort the elements in the range [first, last) into the ascending order defined by the comparator comp...
Definition: utils.h:527
All the possible information needed by Operator.Forward and Backward This is the superset of RunConte...
Definition: op_attr_types.h:66
bool ContainsOnlyStorage(const StorageTypeVector &vstorage, const NDArrayStorageType stype)
returns true if all storage types in vstorage are the same as target stype. false is returned for emp...
Definition: utils.h:235
std::string operator_string(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector< NDArray > &inputs, const std::vector< OpReqType > &req, const std::vector< NDArray > &outputs)
get string representation of the operator
Definition: utils.h:425
NDArray interface that handles array arithematics.
std::mt19937 RANDOM_ENGINE
Random Engine.
Definition: utils.h:551
void EmplaceBackZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype, std::vector< NDArray > *vec)
Helper to add a NDArray of zeros to a std::vector.
Definition: utils.h:696
Indices of RSPNDArray should be non-negative, less than the size of first dimension and in ascending ...
Definition: utils.h:90
Definition: ndarray.h:58
Definition: base.h:138
const TShape & shape() const
Definition: ndarray.h:169
std::string dispatch_mode_string(const DispatchMode x)
get string representation of dispatch_mode
Definition: utils.h:353
std::string dev_type_string(const int dev_type)
get string representation of device type
Definition: utils.h:384
Helper for non-array type T.
Definition: utils.h:562
Definition: base.h:137
Definition: ndarray.h:54
Data structures that can appear in graph attributes.
Definition: ndarray.h:64
IndPtr should be non-negative, in non-decreasing order, start with 0 and end with value equal with si...
Definition: utils.h:57
Definition: ndarray.h:71
std::unique_ptr< T[]> UnknownBound
Type of T.
Definition: utils.h:577
OpReqType
operation request type to Forward and Backward
Definition: op_attr_types.h:45
nnvm::Op Op
operator structure from NNVM
Definition: base.h:130
bool ContainsStorageType(const std::vector< NDArray > &ndarrays, const NDArrayStorageType stype)
returns true if storage type of any array in ndarrays is the same as the target stype. false is returned for empty inputs.
Definition: utils.h:325
constexpr size_t MaxIntegerValue()
Return the max integer value representable in the type T without loss of precision.
Definition: utils.h:655
RunContext run_ctx
RunContext related resources.
Definition: op_attr_types.h:72
int64_t dim_t
data type to store dim size
Definition: c_api.h:62
std::unique_ptr< T > SingleObject
Type of T.
Definition: utils.h:566
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check)
Check the validity of RowSparseNDArray.
Definition: utils.h:167
int GetExecNumMatchColor()
Definition: utils.h:481
Definition: base.h:139
static MSHADOW_XINLINE void Map(int i, DType *out, const IType *idx, const nnvm::dim_t end, const nnvm::dim_t nrows)
Definition: utils.h:92
MSHADOW_XINLINE int ilog2ul(size_t a)
Definition: utils.h:666
void LogStorageFallback(const nnvm::NodeAttrs &attrs, const int dev_mask, const std::vector< int > *in_attrs, const std::vector< int > *out_attrs)
log storage fallback event
Definition: utils.h:454
helper::UniqueIf< T >::SingleObject MakeUnique(Args &&...args)
Constructs an object of type T and wraps it in a std::unique_ptr.
Definition: utils.h:605
Context information about the execution environment.
Definition: base.h:133
Indices should be non-negative, less than the number of columns and in ascending order per row...
Definition: utils.h:72
const TShape & aux_shape(size_t index) const
get the shape of aux_data(index)
Definition: ndarray.h:189
ndarray interface
Definition: ndarray.h:82
MSHADOW_XINLINE int ilog2ui(unsigned int a)
Definition: utils.h:672
static MSHADOW_XINLINE void Map(int i, DType *out, const IType *indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size)
Definition: utils.h:59
std::vector< int > StorageTypeVector
The result holder of storage type of each NodeEntry in the graph.
Definition: graph_attr_types.h:45
tensor blob class that can be used to hold tensor of any dimension, any device and any data type...
Definition: tensor_blob.h:66