mxnet
tensor_inspector.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
27 #ifndef MXNET_COMMON_TENSOR_INSPECTOR_H_
28 #define MXNET_COMMON_TENSOR_INSPECTOR_H_
29 
30 #include <algorithm>
31 #include <cmath>
32 #include <string>
33 #include <vector>
34 #include <fstream>
35 #include "../../3rdparty/mshadow/mshadow/base.h"
36 
37 namespace mxnet {
38 
44  static InspectorManager* get() {
45  static std::mutex mtx;
46  static std::unique_ptr<InspectorManager> im = nullptr;
47  if (!im) {
48  std::unique_lock<std::mutex> lk(mtx);
49  if (!im)
50  im = std::make_unique<InspectorManager>();
51  }
52  return im.get();
53  }
54  /* !\brief mutex used to lock interactive_print() and check_value() */
55  std::mutex mutex_;
56  /* !\brief skip all interactive prints */
58  /* !\brief skip all value checks */
59  bool check_value_skip_all_ = false;
60  /* !\brief visit count for interactive print tags */
61  std::unordered_map<std::string, int> interactive_print_tag_counter_;
62  /* !\brief visit count for check value tags */
63  std::unordered_map<std::string, int> check_value_tag_counter_;
64  /* !\brief visit count for dump value tags */
65  std::unordered_map<std::string, int> dump_to_file_tag_counter_;
66 };
67 
72  NegativeChecker, // check if is negative
73  PositiveChecker, // check if is positive
74  ZeroChecker, // check if is zero
75  NaNChecker, // check if is NaN, will always return false if DType is not a float type
76  InfChecker, // check if is infinity, will always return false if DType is not a float type
77  PositiveInfChecker, // check if is positive infinity,
78  // will always return false if DType is not a float type
79  NegativeInfChecker, // check if is nagative infinity,
80  // will always return false if DType is not a float type
81  FiniteChecker, // check if is finite, will always return false if DType is not a float type
82  NormalChecker, // check if is neither infinity nor NaN
83  AbnormalChecker, // chekck if is infinity or nan
84 };
85 
104  private:
111  template<typename DType, typename StreamType>
112  void tensor_info_to_string(StreamType* os) {
113  const int dimension = tb_.ndim();
114  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
115  *os << tb_.shape_[0];
116  for (int i = 1; i < dimension; ++i) {
117  *os << 'x' << tb_.shape_[i];
118  }
119  *os << ">" << std::endl;
120  }
121 
129  template<typename DType, typename StreamType>
130  void tensor_info_to_string(StreamType* os, const std::vector<index_t>& shape) {
131  const int dimension = shape.size();
132  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
133  *os << shape[0];
134  for (int i = 1; i < dimension; ++i) {
135  *os << 'x' << shape[i];
136  }
137  *os << ">" << std::endl;
138  }
139 
146  template<typename DType, typename StreamType>
147  void to_string_helper(StreamType* os) {
148 #if MXNET_USE_CUDA
149  if (tb_.dev_mask() == gpu::kDevMask) {
150  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
151  .to_string_helper<DType>(os);
152  return;
153  }
154 #endif // MXNET_USE_CUDA
155  const int dimension = tb_.ndim();
156  std::vector<index_t> offsets;
157  index_t multiple = 1;
158  for (int i = dimension - 1; i >= 0; --i) {
159  multiple *= tb_.shape_[i];
160  offsets.push_back(multiple);
161  }
162  *os << std::string(dimension, '[');
163  *os << tb_.dptr<DType>()[0];
164  for (index_t i = 1; i < static_cast<index_t>(tb_.shape_.Size()); ++i) {
165  int n = 0;
166  for (auto off : offsets) {
167  n += (i % off == 0);
168  }
169  if (n) {
170  *os << std::string(n, ']') << ", " << std::string(n, '[');
171  } else {
172  *os << ", ";
173  }
174  *os << tb_.dptr<DType>()[i];
175  }
176  *os << std::string(dimension, ']') << std::endl;
177  tensor_info_to_string<DType>(os);
178  }
179 
187  template<typename DType, typename StreamType>
188  void to_string_helper(StreamType* os, const DType* dptr) {
189 #if MXNET_USE_CUDA
190  if (tb_.dev_mask() == gpu::kDevMask) {
191  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
192  .to_string_helper<DType>(os, dptr);
193  return;
194  }
195 #endif // MXNET_USE_CUDA
196  *os << *dptr << std::endl;
197  *os << "<" << typeid(*dptr).name() << ">" << std::endl;
198  }
199 
208  template<typename DType, typename StreamType>
209  void to_string_helper(StreamType* os, const std::vector<index_t>& sub_shape, index_t offset) {
210 #if MXNET_USE_CUDA
211  if (tb_.dev_mask() == gpu::kDevMask) {
212  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
213  .to_string_helper<DType>(os, sub_shape, offset);
214  return;
215  }
216 #endif // MXNET_USE_CUDA
217  DType* dptr = tb_.dptr<DType>() + offset;
218  if (sub_shape.size() == 0) {
219  to_string_helper<DType>(os, dptr);
220  return;
221  }
222  const int dimension = sub_shape.size();
223  std::vector<index_t> offsets;
224  index_t multiple = 1;
225  for (int i = dimension - 1; i >= 0; --i) {
226  multiple *= sub_shape[i];
227  offsets.push_back(multiple);
228  }
229  std::stringstream ss;
230  *os << std::string(dimension, '[');
231  *os << dptr[0];
232  for (index_t i = 1; i < multiple; ++i) {
233  int n = 0;
234  for (auto off : offsets) {
235  n += (i % off == 0);
236  }
237  if (n) {
238  *os << std::string(n, ']') << ", " << std::string(n, '[');
239  } else {
240  *os << ", ";
241  }
242  *os << dptr[i];
243  }
244  *os << std::string(dimension, ']') << std::endl;
245  tensor_info_to_string<DType>(os, sub_shape);
246  }
247 
255  void print_locator(const std::vector<index_t>& pos, std::vector<index_t>* sub_shape,
256  index_t* offset) {
257  const int dimension = tb_.ndim();
258  const int sub_dim = dimension - pos.size();
259  sub_shape->resize(sub_dim);
260  index_t multiple = 1;
261  for (size_t i = pos.size(), j = 0; i < static_cast<size_t>(dimension); ++i, ++j) {
262  (*sub_shape)[j] = tb_.shape_[i];
263  multiple *= tb_.shape_[i];
264  }
265  index_t sum = 0;
266  index_t m = 1;
267  for (index_t i = pos.size() - 1; i >= 0; --i) {
268  sum += pos[i] * m;
269  m *= tb_.shape_[i];
270  }
271  *offset = sum * multiple;
272  }
273 
280  bool parse_position(std::vector<index_t>* pos, const std::string& str) {
281  const int dimension = tb_.ndim();
282  std::istringstream ss(str);
283  index_t n;
284  while (ss >> n) {
285  pos->push_back(n);
286  if (ss.peek() == ',') {
287  ss.ignore();
288  }
289  }
290  if (pos->size() > static_cast<size_t>(dimension)) {
291  return false;
292  }
293  for (size_t i = 0; i < pos->size(); ++i) {
294  if ((*pos)[i] > (tb_.shape_[i] - 1) || (*pos)[i] < 0) {
295  return false;
296  }
297  }
298  return !pos->empty();
299  }
300 
306  template<typename DType>
307  void interactive_print_helper(std::string tag) {
308 #if MXNET_USE_CUDA
309  if (tb_.dev_mask() == gpu::kDevMask) {
310  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
311  .interactive_print_helper<DType>(tag);
312  return;
313  }
314 #endif // MXNET_USE_CUDA
315  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
318  std::cout << "----------Interactive Print----------" << std::endl;
319  if (tag != "") {
320  std::cout << "Tag: " << tag << " Visit: " <<
322  }
323  tensor_info_to_string<DType>(&std::cout);
324  std::cout << "To print a part of the tensor, " <<
325  "please specify a position, seperated by \",\"" << std::endl;
326  std::cout << "\"e\" for the entire tensor, " <<
327  "\"d\" to dump value to file, " <<
328  "\"b\" to break, " <<
329  "\"s\" to skip all: ";
330  std::string str;
331  std::cin >> str;
332  if (str == "b") {
333  break;
334  } else if (str == "e") {
335  to_string_helper<DType>(&std::cout);
336  continue;
337  } else if (str == "s") {
339  break;
340  } else if (str == "d") {
341  while (true) {
342  std::cout << "Please enter a tag: ";
343  std::cin >> str;
344  if (str.find(' ') != std::string::npos) {
345  std::cout << "Invalid tag name. No space allowed.";
346  continue;
347  }
348  dump_to_file_helper<DType>(str);
349  break;
350  }
351  continue;
352  }
353  std::vector<index_t> pos;
354  if (parse_position(&pos, str)) {
355  std::vector<index_t> sub_shape;
356  index_t offset;
357  print_locator(pos, &sub_shape, &offset);
358  to_string_helper<DType>(&std::cout, sub_shape, offset);
359  } else {
360  std::cout << "invalid command/indices" << std::endl;
361  }
362  }
363  }
364 
370  template<typename DType>
371  std::function<bool(DType)> get_checker(CheckerType ct) {
372  switch (ct) {
373  case NegativeChecker:
374  return [] (DType x) {
375  return x < 0;
376  };
377  case PositiveChecker:
378  return [] (DType x) {
379  return x > 0;
380  };
381  case ZeroChecker:
382  return [] (DType x) {
383  return x == 0;
384  };
385  case NaNChecker:
386  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
387  std::is_same<DType, mshadow::half::half_t>::value) {
388  return [] (DType x) {
389  return x != x;
390  };
391  } else {
392  LOG(WARNING) << "NaNChecker only applies to float types. " <<
393  "Lambda will always return false.";
394  }
395  break;
396  case InfChecker:
397  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
398  std::is_same<DType, mshadow::half::half_t>::value) {
399  return [] (DType x) {
400  return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f;
401  };
402  } else {
403  LOG(WARNING) << "InfChecker only applies to float types. " <<
404  "Lambda will always return false.";
405  }
406  break;
407  case PositiveInfChecker:
408  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
409  std::is_same<DType, mshadow::half::half_t>::value) {
410  return [] (DType x) {
411  return x == (DType)1.0 / 0.0f;
412  };
413  } else {
414  LOG(WARNING) << "PositiveInfChecker only applies to float types. " <<
415  "Lambda will always return false.";
416  }
417  break;
418  case NegativeInfChecker:
419  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
420  std::is_same<DType, mshadow::half::half_t>::value) {
421  return [] (DType x) {
422  return x == -(DType)1.0 / 0.0f;
423  };
424  } else {
425  LOG(WARNING) << "NegativeInfChecker only applies to float types. " <<
426  "Lambda will always return false.";
427  }
428  break;
429  case FiniteChecker:
430  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
431  std::is_same<DType, mshadow::half::half_t>::value) {
432  return [] (DType x) {
433  return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f;
434  };
435  } else {
436  LOG(WARNING) << "FiniteChecker only applies to float types. " <<
437  "Lambda will always return false.";
438  }
439  break;
440  case NormalChecker:
441  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
442  std::is_same<DType, mshadow::half::half_t>::value) {
443  return [] (DType x) {
444  return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f &&
445  x == x;
446  };
447  } else {
448  LOG(WARNING) << "NormalChecker only applies to float types. " <<
449  "Lambda will always return false.";
450  }
451  break;
452  case AbnormalChecker:
453  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
454  std::is_same<DType, mshadow::half::half_t>::value) {
455  return [] (DType x) {
456  return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f ||
457  x != x;
458  };
459  } else {
460  LOG(WARNING) << "AbnormalChecker only applies to float types. " <<
461  "Lambda will always return false.";
462  }
463  break;
464  default:
465  return [] (DType x) {
466  return false;
467  };
468  }
469  return [] (DType x) {return false;};
470  }
471 
476  std::vector<index_t> index_to_coordinates(index_t idx) {
477  const int dimension = tb_.ndim();
478  std::vector<index_t> ret;
479  for (int i = dimension - 1; i >= 0; --i) {
480  ret.push_back(idx % tb_.shape_[i]);
481  idx /= tb_.shape_[i];
482  }
483  std::reverse(ret.begin(), ret.end());
484  return ret;
485  }
486 
496  template<typename DType>
497  void check_value_helper(std::vector<std::vector<index_t>>* ret,
498  const std::function<bool(DType)>& checker, bool interactive, std::string tag) {
499 #if MXNET_USE_CUDA
500  if (tb_.dev_mask() == gpu::kDevMask) {
501  return TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
502  .check_value_helper<DType>(ret, checker, interactive, tag);
503  }
504 #endif // MXNET_USE_CUDA
505  index_t count = 0;
506  std::stringstream ss;
507  ss << "[";
508  bool first_pass = true;
509  for (index_t i = 0; i <static_cast<index_t>(tb_.shape_.Size()); ++i) {
510  if (checker(tb_.dptr<DType>()[i])) {
511  ++count;
512  if (!first_pass) {
513  ss << ", ";
514  }
515  first_pass = false;
516  std::vector<index_t> coords = index_to_coordinates(i);
517  ss << "(" << coords[0];
518  for (size_t i = 1; i < coords.size(); ++i) {
519  ss << ", " << coords[i];
520  }
521  ss << ")";
522  ret->push_back(coords);
523  }
524  }
525  ss << "]" << std::endl;
526  if (interactive) {
527  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
530  std::cout << "----------Value Check----------" << std::endl;
531  tensor_info_to_string<DType>(&std::cout);
532  if (tag != "") {
533  std::cout << "Tag: " << tag << " Visit: " <<
534  InspectorManager::get()->check_value_tag_counter_[tag] << std::endl;
535  }
536  std::cout << count << " value(s) found." << std::endl;
537  std::cout << "To print a part of the tensor," <<
538  " please specify a position, seperated by \",\"" << std::endl;
539  std::cout << "\"e\" for the entire tensor, " <<
540  "\"p\" to print the coordinates of the values found, " <<
541  "\"b\" to break, " <<
542  "\"s\" to skip all: ";
543  std::string str;
544  std::cin >> str;
545  if (str == "b") {
546  break;
547  } else if (str == "e") {
548  to_string_helper<DType>(&std::cout);
549  continue;
550  } else if (str == "p") {
551  std::cout << ss.str() << std::endl;
552  continue;
553  } else if (str == "s") {
555  break;
556  }
557  std::vector<index_t> pos;
558  if (parse_position(&pos, str)) {
559  std::vector<index_t> sub_shape;
560  index_t offset;
561  print_locator(pos, &sub_shape, &offset);
562  to_string_helper<DType>(&std::cout, sub_shape, offset);
563  } else {
564  std::cout << "invalid command/indices" << std::endl;
565  }
566  }
567  }
568  }
569 
574  inline char infer_type(const std::type_info& ti) {
575  if (ti == typeid(float)) return 'f';
576  else if (ti == typeid(double)) return 'f';
577  else if (ti == typeid(mshadow::half::half_t) ) return 'f';
578  else if (ti == typeid(uint8_t)) return 'u';
579  else if (ti == typeid(int32_t)) return 'i';
580  else if (ti == typeid(int64_t)) return 'i';
581  else
582  return '?';
583  }
584 
589  inline std::string infer_type_string(const std::type_info& ti) {
590  if (ti == typeid(float)) return "float";
591  else if (ti == typeid(double)) return "double";
592  else if (ti == typeid(mshadow::half::half_t) ) return "mshasow::half::half_t";
593  else if (ti == typeid(uint8_t)) return "uint8_t";
594  else if (ti == typeid(int32_t)) return "int32_t";
595  else if (ti == typeid(int64_t)) return "int64_t";
596  else
597  return "unknown tyoe";
598  }
599 
603  inline char endian_test() {
604  int x = 1;
605  return (reinterpret_cast<char*>(&x)[0]) ? '<' : '>';
606  }
607 
612  template<typename DType>
613  std::string get_header() {
614  const int dimension = tb_.ndim();
615  std::string dict;
616  dict += "{'descr':'";
617  dict += endian_test();
618  dict += infer_type(typeid(DType));
619  dict += std::to_string(sizeof(DType));
620  dict += "','fortran_order':False,'shape':(";
621  dict += std::to_string(tb_.shape_[0]);
622  for (int i = 1; i < dimension; ++i) {
623  dict += ',';
624  dict += std::to_string(tb_.shape_[i]);
625  }
626  if (dimension == 1) {
627  dict += ",";
628  }
629  dict += ")} ";
630  int padding_size = 64 - ((10 + dict.size()) % 64);
631  dict += std::string(padding_size, ' ');
632  dict.back() = '\n';
633  std::string header;
634  header += static_cast<char>(0x93);
635  header += "NUMPY";
636  header += static_cast<char>(0x01);
637  header += static_cast<char>(0x00);
638  header += static_cast<char>((uint16_t)dict.size() & 0x00ff);
639  header += static_cast<char>(((uint16_t)dict.size() >> 8) & 0x00ff);
640  header += dict;
641  return header;
642  }
643 
650  template<typename DType>
651  void write_npy(const std::string& header, const std::string& filename) {
652  std::ofstream file;
653  file.exceptions(std::ofstream::failbit | std::ofstream::badbit);
654  try {
655  file.open(filename, std::ios::out | std::ios::binary);
656  file.write(header.c_str(), header.size());
657  file.write(reinterpret_cast<char*>(tb_.dptr<DType>()), sizeof(DType) * tb_.shape_.Size());
658  file.close();
659  std::cout << "Tensor dumped to file: " << filename << std::endl;
660  } catch (std::ofstream::failure e) {
661  std::cerr << "Exception opening/writing/closing file " << filename << std::endl;
662  }
663  }
664 
671  template<typename DType>
672  void dump_to_file_helper(const std::string& tag) {
673 #if MXNET_USE_CUDA
674  if (tb_.dev_mask() == gpu::kDevMask) {
675  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
676  .dump_to_file_helper<DType>(tag);
677  return;
678  }
679 #endif // MXNET_USE_CUDA
680  std::string header = get_header<DType>();
682  const int visit = InspectorManager::get()->dump_to_file_tag_counter_[tag];
683  std::string filename = tag + "_" + std::to_string(visit) + ".npy";
684  write_npy<DType>(header, filename);
685  }
686 
690  inline void validate_shape() {
691  const int dimension = tb_.ndim();
692  CHECK(dimension > 0) << "Tensor Inspector does not support empty tensors " <<
693  "or tensors of unknow shape.";
694  for (int i = 0; i < dimension; ++i) {
695  CHECK(tb_.shape_[i] != 0) << "Invalid tensor shape: shape_[" << i << "] is 0";
696  }
697  }
698 
699  /* !\brief the tensor blob */
700  const TBlob tb_;
701  /* !\brief the run context of the tensor */
702  const RunContext& ctx_;
703 
704  public:
713  template<typename Device, int dimension, typename DType>
715  tb_(ts), ctx_(ctx) {
716  validate_shape();
717  }
718 
724  TensorInspector(const TBlob& tb, const RunContext& ctx):
725  tb_(tb), ctx_(ctx) {
726  validate_shape();
727  }
728 
734  TensorInspector(const NDArray& arr, const RunContext& ctx):
735  tb_(arr.data()), ctx_(ctx) {
736  validate_shape();
737  }
738 
742  void print_string() {
743  std::cout << to_string() << std::endl;
744  }
745 
749  std::string to_string() {
750  std::stringstream ss;
751  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
752  to_string_helper<DType>(&ss);
753  });
754  return ss.str();
755  }
756 
761  void interactive_print(std::string tag = "") {
762  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
763  interactive_print_helper<DType>(tag);
764  });
765  }
766 
775  template<typename ValueChecker>
776  std::vector<std::vector<index_t>> check_value(const ValueChecker& checker,
777  bool interactive = false, std::string tag = "") {
778  std::vector<std::vector<index_t>> ret;
779  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
780  check_value_helper<DType>(&ret, checker, ret, interactive, tag);
781  });
782  return ret;
783  }
784 
792  std::vector<std::vector<index_t>> check_value(CheckerType ct, bool interactive = false,
793  std::string tag = "") {
794  std::vector<std::vector<index_t>> ret;
795  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
796  check_value_helper<DType>(&ret, get_checker<DType>(ct), interactive, tag);
797  });
798  return ret;
799  }
800 
805  void dump_to_file(std::string tag) {
806  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
807  dump_to_file_helper<DType>(tag);
808  });
809  }
810 };
811 
812 } // namespace mxnet
813 
814 #endif // MXNET_COMMON_TENSOR_INSPECTOR_H_
std::unordered_map< std::string, int > dump_to_file_tag_counter_
Definition: tensor_inspector.h:65
std::mutex mutex_
Definition: tensor_inspector.h:55
this singleton struct mediates individual TensorInspector objects so that we can control the global b...
Definition: tensor_inspector.h:43
Definition: tensor_inspector.h:79
Definition: tensor_inspector.h:82
namespace of mxnet
Definition: base.h:89
TensorInspector(const mshadow::Tensor< Device, dimension, DType > &ts, const RunContext &ctx)
construct from Tensor object
Definition: tensor_inspector.h:714
std::unordered_map< std::string, int > check_value_tag_counter_
Definition: tensor_inspector.h:63
void print_string()
print the tensor to std::cout
Definition: tensor_inspector.h:742
Definition: tensor_inspector.h:76
std::string to_string()
return a string which contains the values and other info of the tensor
Definition: tensor_inspector.h:749
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
std::vector< std::vector< index_t > > check_value(CheckerType ct, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the lambda evaluates to tru...
Definition: tensor_inspector.h:792
execution time context. The information needed in runtime for actual execution.
Definition: base.h:350
bool interactive_print_skip_all_
Definition: tensor_inspector.h:57
CheckerType
Enum for building value checkers for TensorInspector::check_value()
Definition: tensor_inspector.h:71
This class provides a unified interface to inspect the value of all data types including Tensor...
Definition: tensor_inspector.h:103
Definition: tensor_inspector.h:74
void dump_to_file(std::string tag)
dump the value of the tensor to a file with name "tag_[visit count].npy" in npy format ...
Definition: tensor_inspector.h:805
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:32
Definition: tensor_inspector.h:72
static InspectorManager * get()
Definition: tensor_inspector.h:44
TensorInspector(const NDArray &arr, const RunContext &ctx)
construct from NDArray object. Currently this only works with kDefaultStorage
Definition: tensor_inspector.h:734
bool check_value_skip_all_
Definition: tensor_inspector.h:59
TensorInspector(const TBlob &tb, const RunContext &ctx)
construct from TBlob object
Definition: tensor_inspector.h:724
Definition: tensor_inspector.h:83
Definition: tensor_inspector.h:77
#define MSHADOW_TYPE_SWITCH(type, DType,...)
Definition: base.h:991
void interactive_print(std::string tag="")
interactively print the tensor value
Definition: tensor_inspector.h:761
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:95
ndarray interface
Definition: ndarray.h:82
std::unordered_map< std::string, int > interactive_print_tag_counter_
Definition: tensor_inspector.h:61
std::vector< std::vector< index_t > > check_value(const ValueChecker &checker, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the value checker evaluates...
Definition: tensor_inspector.h:776
Definition: tensor_inspector.h:81
tensor blob class that can be used to hold tensor of any dimension, any device and any data type...
Definition: tensor_blob.h:66
Definition: tensor_inspector.h:75
Definition: tensor_inspector.h:73