mxnet
tensor_inspector.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
26 #ifndef MXNET_COMMON_TENSOR_INSPECTOR_H_
27 #define MXNET_COMMON_TENSOR_INSPECTOR_H_
28 
29 #include <algorithm>
30 #include <cmath>
31 #include <string>
32 #include <vector>
33 #include <fstream>
34 #include "../../3rdparty/mshadow/mshadow/base.h"
35 
36 namespace mxnet {
37 
43  static InspectorManager* get() {
44  static std::mutex mtx;
45  static std::unique_ptr<InspectorManager> im = nullptr;
46  if (!im) {
47  std::unique_lock<std::mutex> lk(mtx);
48  if (!im)
49  im = std::make_unique<InspectorManager>();
50  }
51  return im.get();
52  }
53  /* !\brief mutex used to lock interactive_print() and check_value() */
54  std::mutex mutex_;
55  /* !\brief skip all interactive prints */
57  /* !\brief skip all value checks */
58  bool check_value_skip_all_ = false;
59  /* !\brief visit count for interactive print tags */
60  std::unordered_map<std::string, int> interactive_print_tag_counter_;
61  /* !\brief visit count for check value tags */
62  std::unordered_map<std::string, int> check_value_tag_counter_;
63  /* !\brief visit count for dump value tags */
64  std::unordered_map<std::string, int> dump_to_file_tag_counter_;
65 };
66 
71  NegativeChecker, // check if is negative
72  PositiveChecker, // check if is positive
73  ZeroChecker, // check if is zero
74  NaNChecker, // check if is NaN, will always return false if DType is not a float type
75  InfChecker, // check if is infinity, will always return false if DType is not a float type
76  PositiveInfChecker, // check if is positive infinity,
77  // will always return false if DType is not a float type
78  NegativeInfChecker, // check if is nagative infinity,
79  // will always return false if DType is not a float type
80  FiniteChecker, // check if is finite, will always return false if DType is not a float type
81  NormalChecker, // check if is neither infinity nor NaN
82  AbnormalChecker, // chekck if is infinity or nan
83 };
84 
103  private:
110  template <typename DType, typename StreamType>
111  void tensor_info_to_string(StreamType* os) {
112  const int dimension = tb_.ndim();
113  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
114  *os << tb_.shape_[0];
115  for (int i = 1; i < dimension; ++i) {
116  *os << 'x' << tb_.shape_[i];
117  }
118  *os << ">" << std::endl;
119  }
120 
128  template <typename DType, typename StreamType>
129  void tensor_info_to_string(StreamType* os, const std::vector<index_t>& shape) {
130  const int dimension = shape.size();
131  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
132  *os << shape[0];
133  for (int i = 1; i < dimension; ++i) {
134  *os << 'x' << shape[i];
135  }
136  *os << ">" << std::endl;
137  }
138 
145  template <typename DType, typename StreamType>
146  void to_string_helper(StreamType* os) {
147 #if MXNET_USE_CUDA
148  if (tb_.dev_mask() == gpu::kDevMask) {
149  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_).to_string_helper<DType>(os);
150  return;
151  }
152 #endif // MXNET_USE_CUDA
153  const int dimension = tb_.ndim();
154  std::vector<index_t> offsets;
155  index_t multiple = 1;
156  for (int i = dimension - 1; i >= 0; --i) {
157  multiple *= tb_.shape_[i];
158  offsets.push_back(multiple);
159  }
160  *os << std::string(dimension, '[');
161  *os << tb_.dptr<DType>()[0];
162  for (index_t i = 1; i < static_cast<index_t>(tb_.shape_.Size()); ++i) {
163  int n = 0;
164  for (auto off : offsets) {
165  n += (i % off == 0);
166  }
167  if (n) {
168  *os << std::string(n, ']') << ", " << std::string(n, '[');
169  } else {
170  *os << ", ";
171  }
172  *os << tb_.dptr<DType>()[i];
173  }
174  *os << std::string(dimension, ']') << std::endl;
175  tensor_info_to_string<DType>(os);
176  }
177 
185  template <typename DType, typename StreamType>
186  void to_string_helper(StreamType* os, const DType* dptr) {
187 #if MXNET_USE_CUDA
188  if (tb_.dev_mask() == gpu::kDevMask) {
189  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
190  .to_string_helper<DType>(os, dptr);
191  return;
192  }
193 #endif // MXNET_USE_CUDA
194  *os << *dptr << std::endl;
195  *os << "<" << typeid(*dptr).name() << ">" << std::endl;
196  }
197 
206  template <typename DType, typename StreamType>
207  void to_string_helper(StreamType* os, const std::vector<index_t>& sub_shape, index_t offset) {
208 #if MXNET_USE_CUDA
209  if (tb_.dev_mask() == gpu::kDevMask) {
210  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
211  .to_string_helper<DType>(os, sub_shape, offset);
212  return;
213  }
214 #endif // MXNET_USE_CUDA
215  DType* dptr = tb_.dptr<DType>() + offset;
216  if (sub_shape.size() == 0) {
217  to_string_helper<DType>(os, dptr);
218  return;
219  }
220  const int dimension = sub_shape.size();
221  std::vector<index_t> offsets;
222  index_t multiple = 1;
223  for (int i = dimension - 1; i >= 0; --i) {
224  multiple *= sub_shape[i];
225  offsets.push_back(multiple);
226  }
227  std::stringstream ss;
228  *os << std::string(dimension, '[');
229  *os << dptr[0];
230  for (index_t i = 1; i < multiple; ++i) {
231  int n = 0;
232  for (auto off : offsets) {
233  n += (i % off == 0);
234  }
235  if (n) {
236  *os << std::string(n, ']') << ", " << std::string(n, '[');
237  } else {
238  *os << ", ";
239  }
240  *os << dptr[i];
241  }
242  *os << std::string(dimension, ']') << std::endl;
243  tensor_info_to_string<DType>(os, sub_shape);
244  }
245 
253  void print_locator(const std::vector<index_t>& pos,
254  std::vector<index_t>* sub_shape,
255  index_t* offset) {
256  const int dimension = tb_.ndim();
257  const int sub_dim = dimension - pos.size();
258  sub_shape->resize(sub_dim);
259  index_t multiple = 1;
260  for (size_t i = pos.size(), j = 0; i < static_cast<size_t>(dimension); ++i, ++j) {
261  (*sub_shape)[j] = tb_.shape_[i];
262  multiple *= tb_.shape_[i];
263  }
264  index_t sum = 0;
265  index_t m = 1;
266  for (index_t i = pos.size() - 1; i >= 0; --i) {
267  sum += pos[i] * m;
268  m *= tb_.shape_[i];
269  }
270  *offset = sum * multiple;
271  }
272 
278  bool parse_position(std::vector<index_t>* pos, const std::string& str) {
279  const int dimension = tb_.ndim();
280  std::istringstream ss(str);
281  index_t n;
282  while (ss >> n) {
283  pos->push_back(n);
284  if (ss.peek() == ',') {
285  ss.ignore();
286  }
287  }
288  if (pos->size() > static_cast<size_t>(dimension)) {
289  return false;
290  }
291  for (size_t i = 0; i < pos->size(); ++i) {
292  if ((*pos)[i] > (tb_.shape_[i] - 1) || (*pos)[i] < 0) {
293  return false;
294  }
295  }
296  return !pos->empty();
297  }
298 
304  template <typename DType>
305  void interactive_print_helper(std::string tag) {
306 #if MXNET_USE_CUDA
307  if (tb_.dev_mask() == gpu::kDevMask) {
308  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
309  .interactive_print_helper<DType>(tag);
310  return;
311  }
312 #endif // MXNET_USE_CUDA
313  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
315  while (!InspectorManager::get()->interactive_print_skip_all_) {
316  std::cout << "----------Interactive Print----------" << std::endl;
317  if (tag != "") {
318  std::cout << "Tag: " << tag
320  << std::endl;
321  }
322  tensor_info_to_string<DType>(&std::cout);
323  std::cout << "To print a part of the tensor, "
324  << "please specify a position, seperated by \",\"" << std::endl;
325  std::cout << "\"e\" for the entire tensor, "
326  << "\"d\" to dump value to file, "
327  << "\"b\" to break, "
328  << "\"s\" to skip all: ";
329  std::string str;
330  std::cin >> str;
331  if (str == "b") {
332  break;
333  } else if (str == "e") {
334  to_string_helper<DType>(&std::cout);
335  continue;
336  } else if (str == "s") {
338  break;
339  } else if (str == "d") {
340  while (true) {
341  std::cout << "Please enter a tag: ";
342  std::cin >> str;
343  if (str.find(' ') != std::string::npos) {
344  std::cout << "Invalid tag name. No space allowed.";
345  continue;
346  }
347  dump_to_file_helper<DType>(str);
348  break;
349  }
350  continue;
351  }
352  std::vector<index_t> pos;
353  if (parse_position(&pos, str)) {
354  std::vector<index_t> sub_shape;
355  index_t offset;
356  print_locator(pos, &sub_shape, &offset);
357  to_string_helper<DType>(&std::cout, sub_shape, offset);
358  } else {
359  std::cout << "invalid command/indices" << std::endl;
360  }
361  }
362  }
363 
369  template <typename DType>
370  std::function<bool(DType)> get_checker(CheckerType ct) {
371  switch (ct) {
372  case NegativeChecker:
373  return [](DType x) { return x < 0; };
374  case PositiveChecker:
375  return [](DType x) { return x > 0; };
376  case ZeroChecker:
377  return [](DType x) { return x == 0; };
378  case NaNChecker:
379  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
380  std::is_same<DType, mshadow::half::half_t>::value) {
381  return [](DType x) { return x != x; };
382  } else {
383  LOG(WARNING) << "NaNChecker only applies to float types. "
384  << "Lambda will always return false.";
385  }
386  break;
387  case InfChecker:
388  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
389  std::is_same<DType, mshadow::half::half_t>::value) {
390  return [](DType x) { return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f; };
391  } else {
392  LOG(WARNING) << "InfChecker only applies to float types. "
393  << "Lambda will always return false.";
394  }
395  break;
396  case PositiveInfChecker:
397  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
398  std::is_same<DType, mshadow::half::half_t>::value) {
399  return [](DType x) { return x == (DType)1.0 / 0.0f; };
400  } else {
401  LOG(WARNING) << "PositiveInfChecker only applies to float types. "
402  << "Lambda will always return false.";
403  }
404  break;
405  case NegativeInfChecker:
406  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
407  std::is_same<DType, mshadow::half::half_t>::value) {
408  return [](DType x) { return x == -(DType)1.0 / 0.0f; };
409  } else {
410  LOG(WARNING) << "NegativeInfChecker only applies to float types. "
411  << "Lambda will always return false.";
412  }
413  break;
414  case FiniteChecker:
415  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
416  std::is_same<DType, mshadow::half::half_t>::value) {
417  return [](DType x) { return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f; };
418  } else {
419  LOG(WARNING) << "FiniteChecker only applies to float types. "
420  << "Lambda will always return false.";
421  }
422  break;
423  case NormalChecker:
424  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
425  std::is_same<DType, mshadow::half::half_t>::value) {
426  return
427  [](DType x) { return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f && x == x; };
428  } else {
429  LOG(WARNING) << "NormalChecker only applies to float types. "
430  << "Lambda will always return false.";
431  }
432  break;
433  case AbnormalChecker:
434  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
435  std::is_same<DType, mshadow::half::half_t>::value) {
436  return
437  [](DType x) { return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f || x != x; };
438  } else {
439  LOG(WARNING) << "AbnormalChecker only applies to float types. "
440  << "Lambda will always return false.";
441  }
442  break;
443  default:
444  return [](DType x) { return false; };
445  }
446  return [](DType x) { return false; };
447  }
448 
453  std::vector<index_t> index_to_coordinates(index_t idx) {
454  const int dimension = tb_.ndim();
455  std::vector<index_t> ret;
456  for (int i = dimension - 1; i >= 0; --i) {
457  ret.push_back(idx % tb_.shape_[i]);
458  idx /= tb_.shape_[i];
459  }
460  std::reverse(ret.begin(), ret.end());
461  return ret;
462  }
463 
473  template <typename DType>
474  void check_value_helper(std::vector<std::vector<index_t>>* ret,
475  const std::function<bool(DType)>& checker,
476  bool interactive,
477  std::string tag) {
478 #if MXNET_USE_CUDA
479  if (tb_.dev_mask() == gpu::kDevMask) {
480  return TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
481  .check_value_helper<DType>(ret, checker, interactive, tag);
482  }
483 #endif // MXNET_USE_CUDA
484  index_t count = 0;
485  std::stringstream ss;
486  ss << "[";
487  bool first_pass = true;
488  for (index_t i = 0; i < static_cast<index_t>(tb_.shape_.Size()); ++i) {
489  if (checker(tb_.dptr<DType>()[i])) {
490  ++count;
491  if (!first_pass) {
492  ss << ", ";
493  }
494  first_pass = false;
495  std::vector<index_t> coords = index_to_coordinates(i);
496  ss << "(" << coords[0];
497  for (size_t i = 1; i < coords.size(); ++i) {
498  ss << ", " << coords[i];
499  }
500  ss << ")";
501  ret->push_back(coords);
502  }
503  }
504  ss << "]" << std::endl;
505  if (interactive) {
506  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
508  while (!InspectorManager::get()->check_value_skip_all_) {
509  std::cout << "----------Value Check----------" << std::endl;
510  tensor_info_to_string<DType>(&std::cout);
511  if (tag != "") {
512  std::cout << "Tag: " << tag
513  << " Visit: " << InspectorManager::get()->check_value_tag_counter_[tag]
514  << std::endl;
515  }
516  std::cout << count << " value(s) found." << std::endl;
517  std::cout << "To print a part of the tensor,"
518  << " please specify a position, seperated by \",\"" << std::endl;
519  std::cout << "\"e\" for the entire tensor, "
520  << "\"p\" to print the coordinates of the values found, "
521  << "\"b\" to break, "
522  << "\"s\" to skip all: ";
523  std::string str;
524  std::cin >> str;
525  if (str == "b") {
526  break;
527  } else if (str == "e") {
528  to_string_helper<DType>(&std::cout);
529  continue;
530  } else if (str == "p") {
531  std::cout << ss.str() << std::endl;
532  continue;
533  } else if (str == "s") {
535  break;
536  }
537  std::vector<index_t> pos;
538  if (parse_position(&pos, str)) {
539  std::vector<index_t> sub_shape;
540  index_t offset;
541  print_locator(pos, &sub_shape, &offset);
542  to_string_helper<DType>(&std::cout, sub_shape, offset);
543  } else {
544  std::cout << "invalid command/indices" << std::endl;
545  }
546  }
547  }
548  }
549 
554  inline char infer_type(const std::type_info& ti) {
555  if (ti == typeid(float))
556  return 'f';
557  else if (ti == typeid(double))
558  return 'f';
559  else if (ti == typeid(mshadow::half::half_t))
560  return 'f';
561  else if (ti == typeid(uint8_t))
562  return 'u';
563  else if (ti == typeid(int32_t))
564  return 'i';
565  else if (ti == typeid(int64_t))
566  return 'i';
567  else
568  return '?';
569  }
570 
575  inline std::string infer_type_string(const std::type_info& ti) {
576  if (ti == typeid(float))
577  return "float";
578  else if (ti == typeid(double))
579  return "double";
580  else if (ti == typeid(mshadow::half::half_t))
581  return "mshasow::half::half_t";
582  else if (ti == typeid(uint8_t))
583  return "uint8_t";
584  else if (ti == typeid(int32_t))
585  return "int32_t";
586  else if (ti == typeid(int64_t))
587  return "int64_t";
588  else
589  return "unknown tyoe";
590  }
591 
595  inline char endian_test() {
596  int x = 1;
597  return (reinterpret_cast<char*>(&x)[0]) ? '<' : '>';
598  }
599 
604  template <typename DType>
605  std::string get_header() {
606  const int dimension = tb_.ndim();
607  std::string dict;
608  dict += "{'descr':'";
609  dict += endian_test();
610  dict += infer_type(typeid(DType));
611  dict += std::to_string(sizeof(DType));
612  dict += "','fortran_order':False,'shape':(";
613  dict += std::to_string(tb_.shape_[0]);
614  for (int i = 1; i < dimension; ++i) {
615  dict += ',';
616  dict += std::to_string(tb_.shape_[i]);
617  }
618  if (dimension == 1) {
619  dict += ",";
620  }
621  dict += ")} ";
622  int padding_size = 64 - ((10 + dict.size()) % 64);
623  dict += std::string(padding_size, ' ');
624  dict.back() = '\n';
625  std::string header;
626  header += static_cast<char>(0x93);
627  header += "NUMPY";
628  header += static_cast<char>(0x01);
629  header += static_cast<char>(0x00);
630  header += static_cast<char>((uint16_t)dict.size() & 0x00ff);
631  header += static_cast<char>(((uint16_t)dict.size() >> 8) & 0x00ff);
632  header += dict;
633  return header;
634  }
635 
642  template <typename DType>
643  void write_npy(const std::string& header, const std::string& filename) {
644  std::ofstream file;
645  file.exceptions(std::ofstream::failbit | std::ofstream::badbit);
646  try {
647  file.open(filename, std::ios::out | std::ios::binary);
648  file.write(header.c_str(), header.size());
649  file.write(reinterpret_cast<char*>(tb_.dptr<DType>()), sizeof(DType) * tb_.shape_.Size());
650  file.close();
651  std::cout << "Tensor dumped to file: " << filename << std::endl;
652  } catch (std::ofstream::failure e) {
653  std::cerr << "Exception opening/writing/closing file " << filename << std::endl;
654  }
655  }
656 
663  template <typename DType>
664  void dump_to_file_helper(const std::string& tag) {
665 #if MXNET_USE_CUDA
666  if (tb_.dev_mask() == gpu::kDevMask) {
667  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_).dump_to_file_helper<DType>(tag);
668  return;
669  }
670 #endif // MXNET_USE_CUDA
671  std::string header = get_header<DType>();
673  const int visit = InspectorManager::get()->dump_to_file_tag_counter_[tag];
674  std::string filename = tag + "_" + std::to_string(visit) + ".npy";
675  write_npy<DType>(header, filename);
676  }
677 
681  inline void validate_shape() {
682  const int dimension = tb_.ndim();
683  CHECK(dimension > 0) << "Tensor Inspector does not support empty tensors "
684  << "or tensors of unknow shape.";
685  for (int i = 0; i < dimension; ++i) {
686  CHECK(tb_.shape_[i] != 0) << "Invalid tensor shape: shape_[" << i << "] is 0";
687  }
688  }
689 
690  /* !\brief the tensor blob */
691  const TBlob tb_;
692  /* !\brief the run context of the tensor */
693  const RunContext& ctx_;
694 
695  public:
704  template <typename Device, int dimension, typename DType>
706  : tb_(ts), ctx_(ctx) {
707  validate_shape();
708  }
709 
715  TensorInspector(const TBlob& tb, const RunContext& ctx) : tb_(tb), ctx_(ctx) {
716  validate_shape();
717  }
718 
724  TensorInspector(const NDArray& arr, const RunContext& ctx) : tb_(arr.data()), ctx_(ctx) {
725  validate_shape();
726  }
727 
731  void print_string() {
732  std::cout << to_string() << std::endl;
733  }
734 
738  std::string to_string() {
739  std::stringstream ss;
740  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, { to_string_helper<DType>(&ss); });
741  return ss.str();
742  }
743 
748  void interactive_print(std::string tag = "") {
749  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, { interactive_print_helper<DType>(tag); });
750  }
751 
760  template <typename ValueChecker>
761  std::vector<std::vector<index_t>> check_value(const ValueChecker& checker,
762  bool interactive = false,
763  std::string tag = "") {
764  std::vector<std::vector<index_t>> ret;
765  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
766  check_value_helper<DType>(&ret, checker, ret, interactive, tag);
767  });
768  return ret;
769  }
770 
778  std::vector<std::vector<index_t>> check_value(CheckerType ct,
779  bool interactive = false,
780  std::string tag = "") {
781  std::vector<std::vector<index_t>> ret;
782  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
783  check_value_helper<DType>(&ret, get_checker<DType>(ct), interactive, tag);
784  });
785  return ret;
786  }
787 
792  void dump_to_file(std::string tag) {
793  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, { dump_to_file_helper<DType>(tag); });
794  }
795 };
796 
797 } // namespace mxnet
798 
799 #endif // MXNET_COMMON_TENSOR_INSPECTOR_H_
mxnet
namespace of mxnet
Definition: api_registry.h:33
mxnet::ZeroChecker
@ ZeroChecker
Definition: tensor_inspector.h:73
mxnet::TensorInspector::TensorInspector
TensorInspector(const NDArray &arr, const RunContext &ctx)
construct from NDArray object. Currently this only works with kDefaultStorage
Definition: tensor_inspector.h:724
mxnet::InspectorManager::check_value_skip_all_
bool check_value_skip_all_
Definition: tensor_inspector.h:58
mxnet::InspectorManager::interactive_print_skip_all_
bool interactive_print_skip_all_
Definition: tensor_inspector.h:56
mxnet::InspectorManager
this singleton struct mediates individual TensorInspector objects so that we can control the global b...
Definition: tensor_inspector.h:42
mxnet::TensorInspector::TensorInspector
TensorInspector(const TBlob &tb, const RunContext &ctx)
construct from TBlob object
Definition: tensor_inspector.h:715
mxnet::TensorInspector
This class provides a unified interface to inspect the value of all data types including Tensor,...
Definition: tensor_inspector.h:102
mxnet::InspectorManager::interactive_print_tag_counter_
std::unordered_map< std::string, int > interactive_print_tag_counter_
Definition: tensor_inspector.h:60
mxnet::TBlob::dptr
DType * dptr() const
get pointer in dtype
Definition: tensor_blob.h:249
mxnet::TensorInspector::print_string
void print_string()
print the tensor to std::cout
Definition: tensor_inspector.h:731
mxnet::TensorInspector::to_string
std::string to_string()
return a string which contains the values and other info of the tensor
Definition: tensor_inspector.h:738
mxnet::common::cuda::rtc::util::to_string
std::string to_string(OpReqType req)
Convert OpReqType to string.
mshadow::Tensor
general tensor
Definition: tensor.h:525
mxnet::TensorInspector::dump_to_file
void dump_to_file(std::string tag)
dump the value of the tensor to a file with name "tag_[visit count].npy" in npy format
Definition: tensor_inspector.h:792
mxnet::RunContext
execution time context. The information needed in runtime for actual execution.
Definition: base.h:343
mxnet::common::cuda::rtc::lock
std::mutex lock
mxnet::TensorInspector::check_value
std::vector< std::vector< index_t > > check_value(CheckerType ct, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the lambda evaluates to tru...
Definition: tensor_inspector.h:778
mxnet::InfChecker
@ InfChecker
Definition: tensor_inspector.h:75
mxnet::TBlob::dev_mask
int dev_mask() const
device mask of the corresponding device
Definition: tensor_blob.h:257
mxnet::TensorInspector::TensorInspector
TensorInspector(const mshadow::Tensor< Device, dimension, DType > &ts, const RunContext &ctx)
construct from Tensor object
Definition: tensor_inspector.h:705
mxnet::CheckerType
CheckerType
Enum for building value checkers for TensorInspector::check_value()
Definition: tensor_inspector.h:70
mxnet::TBlob::ndim
int ndim(void) const
return number of dimension of the tensor inside
Definition: tensor_blob.h:231
mxnet::AbnormalChecker
@ AbnormalChecker
Definition: tensor_inspector.h:82
mxnet::NegativeChecker
@ NegativeChecker
Definition: tensor_inspector.h:71
mxnet::TBlob::type_flag_
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
mxnet::InspectorManager::dump_to_file_tag_counter_
std::unordered_map< std::string, int > dump_to_file_tag_counter_
Definition: tensor_inspector.h:64
mxnet::NaNChecker
@ NaNChecker
Definition: tensor_inspector.h:74
mxnet::NDArray
ndarray interface
Definition: ndarray.h:82
mxnet::TBlob
tensor blob class that can be used to hold tensor of any dimension, any device and any data type,...
Definition: tensor_blob.h:65
mxnet::TensorInspector::check_value
std::vector< std::vector< index_t > > check_value(const ValueChecker &checker, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the value checker evaluates...
Definition: tensor_inspector.h:761
mxnet::InspectorManager::mutex_
std::mutex mutex_
Definition: tensor_inspector.h:54
mxnet::TShape::Size
size_t Size() const
Definition: tuple.h:523
mxnet::PositiveInfChecker
@ PositiveInfChecker
Definition: tensor_inspector.h:76
mxnet::index_t
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:81
mxnet::NormalChecker
@ NormalChecker
Definition: tensor_inspector.h:81
mxnet::TensorInspector::interactive_print
void interactive_print(std::string tag="")
interactively print the tensor value
Definition: tensor_inspector.h:748
mxnet::NegativeInfChecker
@ NegativeInfChecker
Definition: tensor_inspector.h:78
mxnet::FiniteChecker
@ FiniteChecker
Definition: tensor_inspector.h:80
mxnet::PositiveChecker
@ PositiveChecker
Definition: tensor_inspector.h:72
mxnet::TBlob::shape_
mxnet::TShape shape_
shape of the tensor
Definition: tensor_blob.h:72
mxnet::InspectorManager::get
static InspectorManager * get()
Definition: tensor_inspector.h:43
mshadow::gpu::kDevMask
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:50
mxnet::InspectorManager::check_value_tag_counter_
std::unordered_map< std::string, int > check_value_tag_counter_
Definition: tensor_inspector.h:62
MSHADOW_TYPE_SWITCH
#define MSHADOW_TYPE_SWITCH(type, DType,...)
Definition: base.h:1163